python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <elfutils/libdw.h>
int main(void)
{
Dwarf_Addr base, start, end;
Dwarf_Attribute attr;
Dwarf_Op *op;
size_t nops;
ptrdiff_t offset = 0;
return (int)dwarf_getlocations(&attr, offset, &base, &start, &end, &op, &nops);
}
| linux-master | tools/build/feature/test-dwarf_getlocations.c |
// SPDX-License-Identifier: GPL-2.0
#include <cpuid.h>
int main(void)
{
unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
}
| linux-master | tools/build/feature/test-get_cpuid.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#if !defined(__UCLIBC__)
#include <gnu/libc-version.h>
#else
#define XSTR(s) STR(s)
#define STR(s) #s
#endif
int main(void)
{
#if !defined(__UCLIBC__)
const char *version = gnu_get_libc_version();
#else
const char *version = XSTR(__GLIBC__) "." XSTR(__GLIBC_MINOR__);
#endif
return (long)version;
}
| linux-master | tools/build/feature/test-glibc.c |
// SPDX-License-Identifier: GPL-2.0
#include <EXTERN.h>
#include <perl.h>
int main(void)
{
perl_alloc();
return 0;
}
| linux-master | tools/build/feature/test-libperl.c |
// SPDX-License-Identifier: GPL-2.0
#include <zstd.h>
int main(void)
{
ZSTD_CStream *cstream;
cstream = ZSTD_createCStream();
ZSTD_freeCStream(cstream);
return 0;
}
| linux-master | tools/build/feature/test-libzstd.c |
// SPDX-License-Identifier: GPL-2.0
#include <libunwind-x86_64.h>
#include <stdlib.h>
extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi,
int need_unwind_info, void *arg);
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
static unw_accessors_t accessors;
int main(void)
{
unw_addr_space_t addr_space;
addr_space = unw_create_addr_space(&accessors, 0);
if (addr_space)
return 0;
unw_init_remote(NULL, addr_space, NULL);
dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
return 0;
}
| linux-master | tools/build/feature/test-libunwind-x86_64.c |
// SPDX-License-Identifier: GPL-2.0
#include <tracefs/tracefs.h>
int main(void)
{
struct tracefs_instance *inst = tracefs_instance_create("dummy");
tracefs_instance_destroy(inst);
return 0;
}
| linux-master | tools/build/feature/test-libtracefs.c |
// SPDX-License-Identifier: GPL-2.0
#include <aio.h>
int main(void)
{
struct aiocb aiocb;
aiocb.aio_fildes = 0;
aiocb.aio_offset = 0;
aiocb.aio_buf = 0;
aiocb.aio_nbytes = 0;
aiocb.aio_reqprio = 0;
aiocb.aio_sigevent.sigev_notify = 1 /*SIGEV_NONE*/;
return (int)aio_return(&aiocb);
}
| linux-master | tools/build/feature/test-libaio.c |
// SPDX-License-Identifier: GPL-2.0
#include <dwarf.h>
#include <elfutils/libdw.h>
#include <elfutils/version.h>
int main(void)
{
Dwarf *dbg = dwarf_begin(0, DWARF_C_READ);
return (long)dbg;
}
| linux-master | tools/build/feature/test-dwarf.c |
// SPDX-License-Identifier: GPL-2.0
#include <Python.h>
int main(void)
{
Py_Initialize();
return 0;
}
#undef _GNU_SOURCE
| linux-master | tools/build/feature/test-libpython.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
#define _GNU_SOURCE
#include <unistd.h>
int main(void)
{
return gettid();
}
#undef _GNU_SOURCE
| linux-master | tools/build/feature/test-gettid.c |
// SPDX-License-Identifier: GPL-2.0
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <sched.h>
int main(void)
{
return sched_getcpu();
}
#undef _GNU_SOURCE
| linux-master | tools/build/feature/test-sched_getcpu.c |
// SPDX-License-Identifier: GPL-2.0
#include <jvmti.h>
int main(void)
{
JavaVM jvm __attribute__((unused));
jvmtiEventCallbacks cb __attribute__((unused));
jvmtiCapabilities caps __attribute__((unused));
jvmtiJlocationFormat format __attribute__((unused));
jvmtiEnv jvmti __attribute__((unused));
return 0;
}
| linux-master | tools/build/feature/test-jvmti.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
struct test {
int a;
int b;
} __attribute__((preserve_access_index));
volatile struct test global_value_for_test = {};
| linux-master | tools/build/feature/test-clang-bpf-co-re.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/unistd.h>
#include <linux/bpf.h>
#include <unistd.h>
#ifndef __NR_bpf
# if defined(__i386__)
# define __NR_bpf 357
# elif defined(__x86_64__)
# define __NR_bpf 321
# elif defined(__aarch64__)
# define __NR_bpf 280
# elif defined(__sparc__)
# define __NR_bpf 349
# elif defined(__s390__)
# define __NR_bpf 351
# elif defined(__mips__) && defined(_ABIO32)
# define __NR_bpf 4355
# elif defined(__mips__) && defined(_ABIN32)
# define __NR_bpf 6319
# elif defined(__mips__) && defined(_ABI64)
# define __NR_bpf 5315
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif
#endif
int main(void)
{
union bpf_attr attr;
/* Check fields in attr */
attr.prog_type = BPF_PROG_TYPE_KPROBE;
attr.insn_cnt = 0;
attr.insns = 0;
attr.license = 0;
attr.log_buf = 0;
attr.log_size = 0;
attr.log_level = 0;
attr.kern_version = 0;
attr.prog_flags = 0;
/*
* Test existence of __NR_bpf and BPF_PROG_LOAD.
* This call should fail if we run the testcase.
*/
return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
}
| linux-master | tools/build/feature/test-bpf.c |
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/debuginfod.h>
int main(void)
{
debuginfod_client* c = debuginfod_begin();
return (long)c;
}
| linux-master | tools/build/feature/test-libdebuginfod.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
int main(void)
{
printf("Hello World!\n");
return 0;
}
| linux-master | tools/build/feature/test-compile.c |
// SPDX-License-Identifier: GPL-2.0
/*
* test-all.c: Try to build all the main testcases at once.
*
* A well-configured system will have all the prereqs installed, so we can speed
* up auto-detection on such systems.
*/
/*
* Quirk: Python and Perl headers cannot be in arbitrary places, so keep
* these 3 testcases at the top:
*/
#define main main_test_libpython
# include "test-libpython.c"
#undef main
#define main main_test_libperl
# include "test-libperl.c"
#undef main
#define main main_test_hello
# include "test-hello.c"
#undef main
#define main main_test_libelf
# include "test-libelf.c"
#undef main
#define main main_test_get_current_dir_name
# include "test-get_current_dir_name.c"
#undef main
#define main main_test_gettid
# include "test-gettid.c"
#undef main
#define main main_test_glibc
# include "test-glibc.c"
#undef main
#define main main_test_dwarf
# include "test-dwarf.c"
#undef main
#define main main_test_dwarf_getlocations
# include "test-dwarf_getlocations.c"
#undef main
#define main main_test_eventfd
# include "test-eventfd.c"
#undef main
#define main main_test_libelf_getphdrnum
# include "test-libelf-getphdrnum.c"
#undef main
#define main main_test_libelf_gelf_getnote
# include "test-libelf-gelf_getnote.c"
#undef main
#define main main_test_libelf_getshdrstrndx
# include "test-libelf-getshdrstrndx.c"
#undef main
#define main main_test_libunwind
# include "test-libunwind.c"
#undef main
#define main main_test_libslang
# include "test-libslang.c"
#undef main
#define main main_test_libbfd
# include "test-libbfd.c"
#undef main
#define main main_test_libbfd_buildid
# include "test-libbfd-buildid.c"
#undef main
#define main main_test_backtrace
# include "test-backtrace.c"
#undef main
#define main main_test_libnuma
# include "test-libnuma.c"
#undef main
#define main main_test_numa_num_possible_cpus
# include "test-numa_num_possible_cpus.c"
#undef main
#define main main_test_timerfd
# include "test-timerfd.c"
#undef main
#define main main_test_stackprotector_all
# include "test-stackprotector-all.c"
#undef main
#define main main_test_libdw_dwarf_unwind
# include "test-libdw-dwarf-unwind.c"
#undef main
#define main main_test_zlib
# include "test-zlib.c"
#undef main
#define main main_test_pthread_attr_setaffinity_np
# include "test-pthread-attr-setaffinity-np.c"
#undef main
#define main main_test_pthread_barrier
# include "test-pthread-barrier.c"
#undef main
#define main main_test_scandirat
# include "test-scandirat.c"
#undef main
#define main main_test_sched_getcpu
# include "test-sched_getcpu.c"
#undef main
# if 0
/*
* Disable libbabeltrace check for test-all, because the requested
* library version is not released yet in most distributions. Will
* reenable later.
*/
#define main main_test_libbabeltrace
# include "test-libbabeltrace.c"
#undef main
#endif
#define main main_test_lzma
# include "test-lzma.c"
#undef main
#define main main_test_get_cpuid
# include "test-get_cpuid.c"
#undef main
#define main main_test_bpf
# include "test-bpf.c"
#undef main
#define main main_test_libcrypto
# include "test-libcrypto.c"
#undef main
#define main main_test_sdt
# include "test-sdt.c"
#undef main
#define main main_test_setns
# include "test-setns.c"
#undef main
#define main main_test_libaio
# include "test-libaio.c"
#undef main
#define main main_test_reallocarray
# include "test-reallocarray.c"
#undef main
#define main main_test_disassembler_four_args
# include "test-disassembler-four-args.c"
#undef main
#define main main_test_disassembler_init_styled
# include "test-disassembler-init-styled.c"
#undef main
#define main main_test_libzstd
# include "test-libzstd.c"
#undef main
int main(int argc, char *argv[])
{
main_test_libpython();
main_test_libperl();
main_test_hello();
main_test_libelf();
main_test_get_current_dir_name();
main_test_gettid();
main_test_glibc();
main_test_dwarf();
main_test_dwarf_getlocations();
main_test_eventfd();
main_test_libelf_getphdrnum();
main_test_libelf_gelf_getnote();
main_test_libelf_getshdrstrndx();
main_test_libunwind();
main_test_libslang();
main_test_libbfd();
main_test_libbfd_buildid();
main_test_backtrace();
main_test_libnuma();
main_test_numa_num_possible_cpus();
main_test_timerfd();
main_test_stackprotector_all();
main_test_libdw_dwarf_unwind();
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
main_test_pthread_barrier();
main_test_lzma();
main_test_get_cpuid();
main_test_bpf();
main_test_libcrypto();
main_test_scandirat();
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
main_test_libaio();
main_test_reallocarray();
main_test_disassembler_four_args();
main_test_libzstd();
return 0;
}
| linux-master | tools/build/feature/test-all.c |
// SPDX-License-Identifier: GPL-2.0
#include <slang/slang.h>
int main(void)
{
return SLsmg_init_smg();
}
| linux-master | tools/build/feature/test-libslang-include-subdir.c |
// SPDX-License-Identifier: GPL-2.0
#include <libunwind-aarch64.h>
#include <stdlib.h>
extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi,
int need_unwind_info, void *arg);
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
static unw_accessors_t accessors;
int main(void)
{
unw_addr_space_t addr_space;
addr_space = unw_create_addr_space(&accessors, 0);
if (addr_space)
return 0;
unw_init_remote(NULL, addr_space, NULL);
dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
return 0;
}
| linux-master | tools/build/feature/test-libunwind-aarch64.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
int main(void)
{
return puts("hi");
}
| linux-master | tools/build/feature/test-fortify-source.c |
// SPDX-License-Identifier: GPL-2.0
#include <libunwind-aarch64.h>
#include <stdlib.h>
extern int
UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
unw_word_t ip, unw_word_t segbase,
const char *obj_name, unw_word_t start,
unw_word_t end);
#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
int main(void)
{
dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
return 0;
}
| linux-master | tools/build/feature/test-libunwind-debug-frame-aarch64.c |
// SPDX-License-Identifier: GPL-2.0
#include <zlib.h>
int main(void)
{
z_stream zs;
inflateInit(&zs);
return 0;
}
| linux-master | tools/build/feature/test-zlib.c |
// SPDX-License-Identifier: GPL-2.0
#include <jvmti.h>
#include <jvmticmlr.h>
int main(void)
{
jvmtiCompiledMethodLoadInlineRecord rec __attribute__((unused));
jvmtiCompiledMethodLoadRecordHeader hdr __attribute__((unused));
PCStackInfo p __attribute__((unused));
return 0;
}
| linux-master | tools/build/feature/test-jvmti-cmlr.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
int main(void)
{
return puts("hi");
}
| linux-master | tools/build/feature/test-stackprotector-all.c |
// SPDX-License-Identifier: GPL-2.0
#include <libunwind-arm.h>
#include <stdlib.h>
extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi,
int need_unwind_info, void *arg);
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
static unw_accessors_t accessors;
int main(void)
{
unw_addr_space_t addr_space;
addr_space = unw_create_addr_space(&accessors, 0);
if (addr_space)
return 0;
unw_init_remote(NULL, addr_space, NULL);
dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
return 0;
}
| linux-master | tools/build/feature/test-libunwind-arm.c |
// SPDX-License-Identifier: GPL-2.0
#include <libunwind-arm.h>
#include <stdlib.h>
extern int
UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
unw_word_t ip, unw_word_t segbase,
const char *obj_name, unw_word_t start,
unw_word_t end);
#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
int main(void)
{
dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
return 0;
}
| linux-master | tools/build/feature/test-libunwind-debug-frame-arm.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/types.h>
#include <perfmon/pfmlib.h>
int main(void)
{
pfm_initialize();
return 0;
}
| linux-master | tools/build/feature/test-libpfm4.c |
// SPDX-License-Identifier: GPL-2.0
#include <libelf.h>
int main(void)
{
Elf *elf = elf_begin(0, ELF_C_READ, 0);
return (long)elf;
}
| linux-master | tools/build/feature/test-libelf.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <gelf.h>
int main(void)
{
return gelf_getnote(NULL, 0, NULL, NULL, NULL);
}
| linux-master | tools/build/feature/test-libelf-gelf_getnote.c |
// SPDX-License-Identifier: GPL-2.0
#include <libaudit.h>
extern int printf(const char *format, ...);
int main(void)
{
printf("error message: %s\n", audit_errno_to_name(0));
return audit_open();
}
| linux-master | tools/build/feature/test-libaudit.c |
// SPDX-License-Identifier: GPL-2.0
#include <android/api-level.h>
int main(void)
{
return __ANDROID_API__;
}
| linux-master | tools/build/feature/test-bionic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* test for timerfd functions used by perf-kvm-stat-live
*/
#include <sys/timerfd.h>
int main(void)
{
struct itimerspec new_value;
int fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
if (fd < 0)
return 1;
if (timerfd_settime(fd, 0, &new_value, NULL) != 0)
return 1;
return 0;
}
| linux-master | tools/build/feature/test-timerfd.c |
// SPDX-License-Identifier: GPL-2.0
#include <bpf/libbpf.h>
#if !defined(LIBBPF_MAJOR_VERSION) || (LIBBPF_MAJOR_VERSION < 1)
#error At least libbpf 1.0 is required for Linux tools.
#endif
int main(void)
{
return bpf_object__open("test") ? 0 : -1;
}
| linux-master | tools/build/feature/test-libbpf.c |
// SPDX-License-Identifier: GPL-2.0
#include <bfd.h>
int main(void)
{
bfd *abfd = bfd_openr("Pedro", 0);
return abfd && (!abfd->build_id || abfd->build_id->size > 0x506564726f);
}
| linux-master | tools/build/feature/test-libbfd-buildid.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <dis-asm.h>
int main(void)
{
struct disassemble_info info;
init_disassemble_info(&info, stdout,
NULL, NULL);
return 0;
}
| linux-master | tools/build/feature/test-disassembler-init-styled.c |
// SPDX-License-Identifier: GPL-2.0
#include <libunwind-x86.h>
#include <stdlib.h>
extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi,
int need_unwind_info, void *arg);
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
static unw_accessors_t accessors;
int main(void)
{
unw_addr_space_t addr_space;
addr_space = unw_create_addr_space(&accessors, 0);
if (addr_space)
return 0;
unw_init_remote(NULL, addr_space, NULL);
dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
return 0;
}
| linux-master | tools/build/feature/test-libunwind-x86.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Disk protection for HP/DELL machines.
*
* Copyright 2008 Eric Piel
* Copyright 2009 Pavel Machek <[email protected]>
* Copyright 2012 Sonal Santan
* Copyright 2014 Pali Rohár <[email protected]>
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <string.h>
#include <stdint.h>
#include <errno.h>
#include <signal.h>
#include <sys/mman.h>
#include <sched.h>
#include <syslog.h>
static int noled;
static char unload_heads_path[64];
static char device_path[32];
static const char app_name[] = "FREE FALL";
static int set_unload_heads_path(char *device)
{
if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
return -EINVAL;
strncpy(device_path, device, sizeof(device_path) - 1);
snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
"/sys/block/%s/device/unload_heads", device+5);
return 0;
}
static int valid_disk(void)
{
int fd = open(unload_heads_path, O_RDONLY);
if (fd < 0) {
perror(unload_heads_path);
return 0;
}
close(fd);
return 1;
}
static void write_int(char *path, int i)
{
char buf[1024];
int fd = open(path, O_RDWR);
if (fd < 0) {
perror("open");
exit(1);
}
sprintf(buf, "%d", i);
if (write(fd, buf, strlen(buf)) != strlen(buf)) {
perror("write");
exit(1);
}
close(fd);
}
static void set_led(int on)
{
if (noled)
return;
write_int("/sys/class/leds/hp::hddprotect/brightness", on);
}
static void protect(int seconds)
{
const char *str = (seconds == 0) ? "Unparked" : "Parked";
write_int(unload_heads_path, seconds*1000);
syslog(LOG_INFO, "%s %s disk head\n", str, device_path);
}
static int on_ac(void)
{
/* /sys/class/power_supply/AC0/online */
return 1;
}
static int lid_open(void)
{
/* /proc/acpi/button/lid/LID/state */
return 1;
}
static void ignore_me(int signum)
{
protect(0);
set_led(0);
}
int main(int argc, char **argv)
{
int fd, ret;
struct stat st;
struct sched_param param;
if (argc == 1)
ret = set_unload_heads_path("/dev/sda");
else if (argc == 2)
ret = set_unload_heads_path(argv[1]);
else
ret = -EINVAL;
if (ret || !valid_disk()) {
fprintf(stderr, "usage: %s <device> (default: /dev/sda)\n",
argv[0]);
exit(1);
}
fd = open("/dev/freefall", O_RDONLY);
if (fd < 0) {
perror("/dev/freefall");
return EXIT_FAILURE;
}
if (stat("/sys/class/leds/hp::hddprotect/brightness", &st))
noled = 1;
if (daemon(0, 0) != 0) {
perror("daemon");
return EXIT_FAILURE;
}
openlog(app_name, LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);
param.sched_priority = sched_get_priority_max(SCHED_FIFO);
sched_setscheduler(0, SCHED_FIFO, ¶m);
mlockall(MCL_CURRENT|MCL_FUTURE);
signal(SIGALRM, ignore_me);
for (;;) {
unsigned char count;
ret = read(fd, &count, sizeof(count));
alarm(0);
if ((ret == -1) && (errno == EINTR)) {
/* Alarm expired, time to unpark the heads */
continue;
}
if (ret != sizeof(count)) {
perror("read");
break;
}
protect(21);
set_led(1);
if (1 || on_ac() || lid_open())
alarm(2);
else
alarm(20);
}
closelog();
close(fd);
return EXIT_SUCCESS;
}
| linux-master | tools/laptop/freefall/freefall.c |
/*
* dslm.c
* Simple Disk Sleep Monitor
* by Bartek Kania
* Licensed under the GPL
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <time.h>
#include <string.h>
#include <signal.h>
#include <sys/ioctl.h>
#include <linux/hdreg.h>
#ifdef DEBUG
#define D(x) x
#else
#define D(x)
#endif
int endit = 0;
/* Check if the disk is in powersave-mode
* Most of the code is stolen from hdparm.
* 1 = active, 0 = standby/sleep, -1 = unknown */
static int check_powermode(int fd)
{
unsigned char args[4] = {WIN_CHECKPOWERMODE1,0,0,0};
int state;
if (ioctl(fd, HDIO_DRIVE_CMD, &args)
&& (args[0] = WIN_CHECKPOWERMODE2) /* try again with 0x98 */
&& ioctl(fd, HDIO_DRIVE_CMD, &args)) {
if (errno != EIO || args[0] != 0 || args[1] != 0) {
state = -1; /* "unknown"; */
} else
state = 0; /* "sleeping"; */
} else {
state = (args[2] == 255) ? 1 : 0;
}
D(printf(" drive state is: %d\n", state));
return state;
}
static char *state_name(int i)
{
if (i == -1) return "unknown";
if (i == 0) return "sleeping";
if (i == 1) return "active";
return "internal error";
}
static char *myctime(time_t time)
{
char *ts = ctime(&time);
ts[strlen(ts) - 1] = 0;
return ts;
}
static void measure(int fd)
{
time_t start_time;
int last_state;
time_t last_time;
int curr_state;
time_t curr_time = 0;
time_t time_diff;
time_t active_time = 0;
time_t sleep_time = 0;
time_t unknown_time = 0;
time_t total_time = 0;
int changes = 0;
float tmp;
printf("Starting measurements\n");
last_state = check_powermode(fd);
start_time = last_time = time(0);
printf(" System is in state %s\n\n", state_name(last_state));
while(!endit) {
sleep(1);
curr_state = check_powermode(fd);
if (curr_state != last_state || endit) {
changes++;
curr_time = time(0);
time_diff = curr_time - last_time;
if (last_state == 1) active_time += time_diff;
else if (last_state == 0) sleep_time += time_diff;
else unknown_time += time_diff;
last_state = curr_state;
last_time = curr_time;
printf("%s: State-change to %s\n", myctime(curr_time),
state_name(curr_state));
}
}
changes--; /* Compensate for SIGINT */
total_time = time(0) - start_time;
printf("\nTotal running time: %lus\n", curr_time - start_time);
printf(" State changed %d times\n", changes);
tmp = (float)sleep_time / (float)total_time * 100;
printf(" Time in sleep state: %lus (%.2f%%)\n", sleep_time, tmp);
tmp = (float)active_time / (float)total_time * 100;
printf(" Time in active state: %lus (%.2f%%)\n", active_time, tmp);
tmp = (float)unknown_time / (float)total_time * 100;
printf(" Time in unknown state: %lus (%.2f%%)\n", unknown_time, tmp);
}
static void ender(int s)
{
endit = 1;
}
static void usage(void)
{
puts("usage: dslm [-w <time>] <disk>");
exit(0);
}
int main(int argc, char **argv)
{
int fd;
char *disk = 0;
int settle_time = 60;
/* Parse the simple command-line */
if (argc == 2)
disk = argv[1];
else if (argc == 4) {
settle_time = atoi(argv[2]);
disk = argv[3];
} else
usage();
if (!(fd = open(disk, O_RDONLY|O_NONBLOCK))) {
printf("Can't open %s, because: %s\n", disk, strerror(errno));
exit(-1);
}
if (settle_time) {
printf("Waiting %d seconds for the system to settle down to "
"'normal'\n", settle_time);
sleep(settle_time);
} else
puts("Not waiting for system to settle down");
signal(SIGINT, ender);
measure(fd);
close(fd);
return 0;
}
| linux-master | tools/laptop/dslm/dslm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is rewrite of original c2c tool introduced in here:
* http://lwn.net/Articles/588866/
*
* The original tool was changed to fit in current perf state.
*
* Original authors:
* Don Zickus <[email protected]>
* Dick Fowles <[email protected]>
* Joe Mario <[email protected]>
*/
#include <errno.h>
#include <inttypes.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/zalloc.h>
#include <asm/bug.h>
#include <sys/param.h>
#include "debug.h"
#include "builtin.h"
#include <perf/cpumap.h>
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "map_symbol.h"
#include "mem-events.h"
#include "session.h"
#include "hist.h"
#include "sort.h"
#include "tool.h"
#include "cacheline.h"
#include "data.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "ui/browsers/hists.h"
#include "thread.h"
#include "mem2node.h"
#include "symbol.h"
#include "ui/ui.h"
#include "ui/progress.h"
#include "pmus.h"
#include "string2.h"
#include "util/util.h"
struct c2c_hists {
struct hists hists;
struct perf_hpp_list list;
struct c2c_stats stats;
};
struct compute_stats {
struct stats lcl_hitm;
struct stats rmt_hitm;
struct stats lcl_peer;
struct stats rmt_peer;
struct stats load;
};
struct c2c_hist_entry {
struct c2c_hists *hists;
struct c2c_stats stats;
unsigned long *cpuset;
unsigned long *nodeset;
struct c2c_stats *node_stats;
unsigned int cacheline_idx;
struct compute_stats cstats;
unsigned long paddr;
unsigned long paddr_cnt;
bool paddr_zero;
char *nodestr;
/*
* must be at the end,
* because of its callchain dynamic entry
*/
struct hist_entry he;
};
static char const *coalesce_default = "iaddr";
struct perf_c2c {
struct perf_tool tool;
struct c2c_hists hists;
struct mem2node mem2node;
unsigned long **nodes;
int nodes_cnt;
int cpus_cnt;
int *cpu2node;
int node_info;
bool show_src;
bool show_all;
bool use_stdio;
bool stats_only;
bool symbol_full;
bool stitch_lbr;
/* Shared cache line stats */
struct c2c_stats shared_clines_stats;
int shared_clines;
int display;
const char *coalesce;
char *cl_sort;
char *cl_resort;
char *cl_output;
};
enum {
DISPLAY_LCL_HITM,
DISPLAY_RMT_HITM,
DISPLAY_TOT_HITM,
DISPLAY_SNP_PEER,
DISPLAY_MAX,
};
static const char *display_str[DISPLAY_MAX] = {
[DISPLAY_LCL_HITM] = "Local HITMs",
[DISPLAY_RMT_HITM] = "Remote HITMs",
[DISPLAY_TOT_HITM] = "Total HITMs",
[DISPLAY_SNP_PEER] = "Peer Snoop",
};
static const struct option c2c_options[] = {
OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"),
OPT_END()
};
static struct perf_c2c c2c;
static void *c2c_he_zalloc(size_t size)
{
struct c2c_hist_entry *c2c_he;
c2c_he = zalloc(size + sizeof(*c2c_he));
if (!c2c_he)
return NULL;
c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
if (!c2c_he->cpuset)
goto out_free;
c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
if (!c2c_he->nodeset)
goto out_free;
c2c_he->node_stats = zalloc(c2c.nodes_cnt * sizeof(*c2c_he->node_stats));
if (!c2c_he->node_stats)
goto out_free;
init_stats(&c2c_he->cstats.lcl_hitm);
init_stats(&c2c_he->cstats.rmt_hitm);
init_stats(&c2c_he->cstats.lcl_peer);
init_stats(&c2c_he->cstats.rmt_peer);
init_stats(&c2c_he->cstats.load);
return &c2c_he->he;
out_free:
zfree(&c2c_he->nodeset);
zfree(&c2c_he->cpuset);
free(c2c_he);
return NULL;
}
static void c2c_he_free(void *he)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
if (c2c_he->hists) {
hists__delete_entries(&c2c_he->hists->hists);
zfree(&c2c_he->hists);
}
zfree(&c2c_he->cpuset);
zfree(&c2c_he->nodeset);
zfree(&c2c_he->nodestr);
zfree(&c2c_he->node_stats);
free(c2c_he);
}
static struct hist_entry_ops c2c_entry_ops = {
.new = c2c_he_zalloc,
.free = c2c_he_free,
};
static int c2c_hists__init(struct c2c_hists *hists,
const char *sort,
int nr_header_lines);
static struct c2c_hists*
he__get_c2c_hists(struct hist_entry *he,
const char *sort,
int nr_header_lines)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *hists;
int ret;
c2c_he = container_of(he, struct c2c_hist_entry, he);
if (c2c_he->hists)
return c2c_he->hists;
hists = c2c_he->hists = zalloc(sizeof(*hists));
if (!hists)
return NULL;
ret = c2c_hists__init(hists, sort, nr_header_lines);
if (ret) {
free(hists);
return NULL;
}
return hists;
}
static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
struct perf_sample *sample)
{
if (WARN_ONCE(sample->cpu == (unsigned int) -1,
"WARNING: no sample cpu value"))
return;
__set_bit(sample->cpu, c2c_he->cpuset);
}
static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
struct perf_sample *sample)
{
int node;
if (!sample->phys_addr) {
c2c_he->paddr_zero = true;
return;
}
node = mem2node__node(&c2c.mem2node, sample->phys_addr);
if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
return;
__set_bit(node, c2c_he->nodeset);
if (c2c_he->paddr != sample->phys_addr) {
c2c_he->paddr_cnt++;
c2c_he->paddr = sample->phys_addr;
}
}
static void compute_stats(struct c2c_hist_entry *c2c_he,
struct c2c_stats *stats,
u64 weight)
{
struct compute_stats *cstats = &c2c_he->cstats;
if (stats->rmt_hitm)
update_stats(&cstats->rmt_hitm, weight);
else if (stats->lcl_hitm)
update_stats(&cstats->lcl_hitm, weight);
else if (stats->rmt_peer)
update_stats(&cstats->rmt_peer, weight);
else if (stats->lcl_peer)
update_stats(&cstats->lcl_peer, weight);
else if (stats->load)
update_stats(&cstats->load, weight);
}
static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct c2c_hists *c2c_hists = &c2c.hists;
struct c2c_hist_entry *c2c_he;
struct c2c_stats stats = { .nr_entries = 0, };
struct hist_entry *he;
struct addr_location al;
struct mem_info *mi, *mi_dup;
struct callchain_cursor *cursor;
int ret;
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
ret = -1;
goto out;
}
if (c2c.stitch_lbr)
thread__set_lbr_stitch_enable(al.thread, true);
cursor = get_tls_callchain_cursor();
ret = sample__resolve_callchain(sample, cursor, NULL,
evsel, &al, sysctl_perf_event_max_stack);
if (ret)
goto out;
mi = sample__resolve_mem(sample, &al);
if (mi == NULL) {
ret = -ENOMEM;
goto out;
}
/*
* The mi object is released in hists__add_entry_ops,
* if it gets sorted out into existing data, so we need
* to take the copy now.
*/
mi_dup = mem_info__get(mi);
c2c_decode_stats(&stats, mi);
he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
&al, NULL, NULL, mi, NULL,
sample, true);
if (he == NULL)
goto free_mi;
c2c_he = container_of(he, struct c2c_hist_entry, he);
c2c_add_stats(&c2c_he->stats, &stats);
c2c_add_stats(&c2c_hists->stats, &stats);
c2c_he__set_cpu(c2c_he, sample);
c2c_he__set_node(c2c_he, sample);
hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
ret = hist_entry__append_callchain(he, sample);
if (!ret) {
/*
* There's already been warning about missing
* sample's cpu value. Let's account all to
* node 0 in this case, without any further
* warning.
*
* Doing node stats only for single callchain data.
*/
int cpu = sample->cpu == (unsigned int) -1 ? 0 : sample->cpu;
int node = c2c.cpu2node[cpu];
mi = mi_dup;
c2c_hists = he__get_c2c_hists(he, c2c.cl_sort, 2);
if (!c2c_hists)
goto free_mi;
he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
&al, NULL, NULL, mi, NULL,
sample, true);
if (he == NULL)
goto free_mi;
c2c_he = container_of(he, struct c2c_hist_entry, he);
c2c_add_stats(&c2c_he->stats, &stats);
c2c_add_stats(&c2c_hists->stats, &stats);
c2c_add_stats(&c2c_he->node_stats[node], &stats);
compute_stats(c2c_he, &stats, sample->weight);
c2c_he__set_cpu(c2c_he, sample);
c2c_he__set_node(c2c_he, sample);
hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
ret = hist_entry__append_callchain(he, sample);
}
out:
addr_location__exit(&al);
return ret;
free_mi:
mem_info__put(mi_dup);
mem_info__put(mi);
ret = -ENOMEM;
goto out;
}
static struct perf_c2c c2c = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.attr = perf_event__process_attr,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.auxtrace_error = perf_event__process_auxtrace_error,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
};
static const char * const c2c_usage[] = {
"perf c2c {record|report}",
NULL
};
static const char * const __usage_report[] = {
"perf c2c report",
NULL
};
static const char * const *report_c2c_usage = __usage_report;
#define C2C_HEADER_MAX 2
struct c2c_header {
struct {
const char *text;
int span;
} line[C2C_HEADER_MAX];
};
struct c2c_dimension {
struct c2c_header header;
const char *name;
int width;
struct sort_entry *se;
int64_t (*cmp)(struct perf_hpp_fmt *fmt,
struct hist_entry *, struct hist_entry *);
int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
};
struct c2c_fmt {
struct perf_hpp_fmt fmt;
struct c2c_dimension *dim;
};
#define SYMBOL_WIDTH 30
static struct c2c_dimension dim_symbol;
static struct c2c_dimension dim_srcline;
static int symbol_width(struct hists *hists, struct sort_entry *se)
{
int width = hists__col_len(hists, se->se_width_idx);
if (!c2c.symbol_full)
width = MIN(width, SYMBOL_WIDTH);
return width;
}
static int c2c_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists)
{
struct c2c_fmt *c2c_fmt;
struct c2c_dimension *dim;
c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
dim = c2c_fmt->dim;
if (dim == &dim_symbol || dim == &dim_srcline)
return symbol_width(hists, dim->se);
return dim->se ? hists__col_len(hists, dim->se->se_width_idx) :
c2c_fmt->dim->width;
}
static int c2c_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hists *hists, int line, int *span)
{
struct perf_hpp_list *hpp_list = hists->hpp_list;
struct c2c_fmt *c2c_fmt;
struct c2c_dimension *dim;
const char *text = NULL;
int width = c2c_width(fmt, hpp, hists);
c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
dim = c2c_fmt->dim;
if (dim->se) {
text = dim->header.line[line].text;
/* Use the last line from sort_entry if not defined. */
if (!text && (line == hpp_list->nr_header_lines - 1))
text = dim->se->se_header;
} else {
text = dim->header.line[line].text;
if (*span) {
(*span)--;
return 0;
} else {
*span = dim->header.line[line].span;
}
}
if (text == NULL)
text = "";
return scnprintf(hpp->buf, hpp->size, "%*s", width, text);
}
#define HEX_STR(__s, __v) \
({ \
scnprintf(__s, sizeof(__s), "0x%" PRIx64, __v); \
__s; \
})
static int64_t
dcacheline_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return sort__dcacheline_cmp(left, right);
}
static int dcacheline_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
uint64_t addr = 0;
int width = c2c_width(fmt, hpp, he->hists);
char buf[20];
if (he->mem_info)
addr = cl_address(he->mem_info->daddr.addr, chk_double_cl);
return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
}
static int
dcacheline_node_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
c2c_he = container_of(he, struct c2c_hist_entry, he);
if (WARN_ON_ONCE(!c2c_he->nodestr))
return 0;
return scnprintf(hpp->buf, hpp->size, "%*s", width, c2c_he->nodestr);
}
static int
dcacheline_node_count(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
c2c_he = container_of(he, struct c2c_hist_entry, he);
return scnprintf(hpp->buf, hpp->size, "%*lu", width, c2c_he->paddr_cnt);
}
static int offset_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
uint64_t addr = 0;
int width = c2c_width(fmt, hpp, he->hists);
char buf[20];
if (he->mem_info)
addr = cl_offset(he->mem_info->daddr.al_addr, chk_double_cl);
return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
}
static int64_t
offset_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
if (left->mem_info)
l = cl_offset(left->mem_info->daddr.addr, chk_double_cl);
if (right->mem_info)
r = cl_offset(right->mem_info->daddr.addr, chk_double_cl);
return (int64_t)(r - l);
}
static int
iaddr_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
uint64_t addr = 0;
int width = c2c_width(fmt, hpp, he->hists);
char buf[20];
if (he->mem_info)
addr = he->mem_info->iaddr.addr;
return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
}
static int64_t
iaddr_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return sort__iaddr_cmp(left, right);
}
static int
tot_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
unsigned int tot_hitm;
c2c_he = container_of(he, struct c2c_hist_entry, he);
tot_hitm = c2c_he->stats.lcl_hitm + c2c_he->stats.rmt_hitm;
return scnprintf(hpp->buf, hpp->size, "%*u", width, tot_hitm);
}
static int64_t
tot_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct c2c_hist_entry *c2c_left;
struct c2c_hist_entry *c2c_right;
uint64_t tot_hitm_left;
uint64_t tot_hitm_right;
c2c_left = container_of(left, struct c2c_hist_entry, he);
c2c_right = container_of(right, struct c2c_hist_entry, he);
tot_hitm_left = c2c_left->stats.lcl_hitm + c2c_left->stats.rmt_hitm;
tot_hitm_right = c2c_right->stats.lcl_hitm + c2c_right->stats.rmt_hitm;
return tot_hitm_left - tot_hitm_right;
}
#define STAT_FN_ENTRY(__f) \
static int \
__f ## _entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
struct c2c_hist_entry *c2c_he; \
int width = c2c_width(fmt, hpp, he->hists); \
\
c2c_he = container_of(he, struct c2c_hist_entry, he); \
return scnprintf(hpp->buf, hpp->size, "%*u", width, \
c2c_he->stats.__f); \
}
#define STAT_FN_CMP(__f) \
static int64_t \
__f ## _cmp(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *left, struct hist_entry *right) \
{ \
struct c2c_hist_entry *c2c_left, *c2c_right; \
\
c2c_left = container_of(left, struct c2c_hist_entry, he); \
c2c_right = container_of(right, struct c2c_hist_entry, he); \
return (uint64_t) c2c_left->stats.__f - \
(uint64_t) c2c_right->stats.__f; \
}
#define STAT_FN(__f) \
STAT_FN_ENTRY(__f) \
STAT_FN_CMP(__f)
STAT_FN(rmt_hitm)
STAT_FN(lcl_hitm)
STAT_FN(rmt_peer)
STAT_FN(lcl_peer)
STAT_FN(tot_peer)
STAT_FN(store)
STAT_FN(st_l1hit)
STAT_FN(st_l1miss)
STAT_FN(st_na)
STAT_FN(ld_fbhit)
STAT_FN(ld_l1hit)
STAT_FN(ld_l2hit)
STAT_FN(ld_llchit)
STAT_FN(rmt_hit)
static uint64_t get_load_llc_misses(struct c2c_stats *stats)
{
return stats->lcl_dram +
stats->rmt_dram +
stats->rmt_hitm +
stats->rmt_hit;
}
static uint64_t get_load_cache_hits(struct c2c_stats *stats)
{
return stats->ld_fbhit +
stats->ld_l1hit +
stats->ld_l2hit +
stats->ld_llchit +
stats->lcl_hitm;
}
static uint64_t get_stores(struct c2c_stats *stats)
{
return stats->st_l1hit +
stats->st_l1miss +
stats->st_na;
}
static uint64_t total_records(struct c2c_stats *stats)
{
return get_load_llc_misses(stats) +
get_load_cache_hits(stats) +
get_stores(stats);
}
static int
tot_recs_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
uint64_t tot_recs;
c2c_he = container_of(he, struct c2c_hist_entry, he);
tot_recs = total_records(&c2c_he->stats);
return scnprintf(hpp->buf, hpp->size, "%*" PRIu64, width, tot_recs);
}
static int64_t
tot_recs_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct c2c_hist_entry *c2c_left;
struct c2c_hist_entry *c2c_right;
uint64_t tot_recs_left;
uint64_t tot_recs_right;
c2c_left = container_of(left, struct c2c_hist_entry, he);
c2c_right = container_of(right, struct c2c_hist_entry, he);
tot_recs_left = total_records(&c2c_left->stats);
tot_recs_right = total_records(&c2c_right->stats);
return tot_recs_left - tot_recs_right;
}
static uint64_t total_loads(struct c2c_stats *stats)
{
return get_load_llc_misses(stats) +
get_load_cache_hits(stats);
}
static int
tot_loads_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
uint64_t tot_recs;
c2c_he = container_of(he, struct c2c_hist_entry, he);
tot_recs = total_loads(&c2c_he->stats);
return scnprintf(hpp->buf, hpp->size, "%*" PRIu64, width, tot_recs);
}
static int64_t
tot_loads_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct c2c_hist_entry *c2c_left;
struct c2c_hist_entry *c2c_right;
uint64_t tot_recs_left;
uint64_t tot_recs_right;
c2c_left = container_of(left, struct c2c_hist_entry, he);
c2c_right = container_of(right, struct c2c_hist_entry, he);
tot_recs_left = total_loads(&c2c_left->stats);
tot_recs_right = total_loads(&c2c_right->stats);
return tot_recs_left - tot_recs_right;
}
typedef double (get_percent_cb)(struct c2c_hist_entry *);
static int
percent_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he, get_percent_cb get_percent)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
double per;
c2c_he = container_of(he, struct c2c_hist_entry, he);
per = get_percent(c2c_he);
#ifdef HAVE_SLANG_SUPPORT
if (use_browser)
return __hpp__slsmg_color_printf(hpp, "%*.2f%%", width - 1, per);
#endif
return hpp_color_scnprintf(hpp, "%*.2f%%", width - 1, per);
}
static double percent_costly_snoop(struct c2c_hist_entry *c2c_he)
{
struct c2c_hists *hists;
struct c2c_stats *stats;
struct c2c_stats *total;
int tot = 0, st = 0;
double p;
hists = container_of(c2c_he->he.hists, struct c2c_hists, hists);
stats = &c2c_he->stats;
total = &hists->stats;
switch (c2c.display) {
case DISPLAY_RMT_HITM:
st = stats->rmt_hitm;
tot = total->rmt_hitm;
break;
case DISPLAY_LCL_HITM:
st = stats->lcl_hitm;
tot = total->lcl_hitm;
break;
case DISPLAY_TOT_HITM:
st = stats->tot_hitm;
tot = total->tot_hitm;
break;
case DISPLAY_SNP_PEER:
st = stats->tot_peer;
tot = total->tot_peer;
break;
default:
break;
}
p = tot ? (double) st / tot : 0;
return 100 * p;
}
#define PERC_STR(__s, __v) \
({ \
scnprintf(__s, sizeof(__s), "%.2F%%", __v); \
__s; \
})
static int
percent_costly_snoop_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
char buf[10];
double per;
c2c_he = container_of(he, struct c2c_hist_entry, he);
per = percent_costly_snoop(c2c_he);
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_costly_snoop_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_costly_snoop);
}
static int64_t
percent_costly_snoop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct c2c_hist_entry *c2c_left;
struct c2c_hist_entry *c2c_right;
double per_left;
double per_right;
c2c_left = container_of(left, struct c2c_hist_entry, he);
c2c_right = container_of(right, struct c2c_hist_entry, he);
per_left = percent_costly_snoop(c2c_left);
per_right = percent_costly_snoop(c2c_right);
return per_left - per_right;
}
static struct c2c_stats *he_stats(struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
return &c2c_he->stats;
}
static struct c2c_stats *total_stats(struct hist_entry *he)
{
struct c2c_hists *hists;
hists = container_of(he->hists, struct c2c_hists, hists);
return &hists->stats;
}
static double percent(u32 st, u32 tot)
{
return tot ? 100. * (double) st / (double) tot : 0;
}
#define PERCENT(__h, __f) percent(he_stats(__h)->__f, total_stats(__h)->__f)
#define PERCENT_FN(__f) \
static double percent_ ## __f(struct c2c_hist_entry *c2c_he) \
{ \
struct c2c_hists *hists; \
\
hists = container_of(c2c_he->he.hists, struct c2c_hists, hists); \
return percent(c2c_he->stats.__f, hists->stats.__f); \
}
PERCENT_FN(rmt_hitm)
PERCENT_FN(lcl_hitm)
PERCENT_FN(rmt_peer)
PERCENT_FN(lcl_peer)
PERCENT_FN(st_l1hit)
PERCENT_FN(st_l1miss)
PERCENT_FN(st_na)
static int
percent_rmt_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
double per = PERCENT(he, rmt_hitm);
char buf[10];
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_rmt_hitm_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_rmt_hitm);
}
static int64_t
percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = PERCENT(left, rmt_hitm);
per_right = PERCENT(right, rmt_hitm);
return per_left - per_right;
}
static int
percent_lcl_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
double per = PERCENT(he, lcl_hitm);
char buf[10];
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_lcl_hitm_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_lcl_hitm);
}
static int64_t
percent_lcl_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = PERCENT(left, lcl_hitm);
per_right = PERCENT(right, lcl_hitm);
return per_left - per_right;
}
static int
percent_lcl_peer_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
double per = PERCENT(he, lcl_peer);
char buf[10];
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_lcl_peer_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_lcl_peer);
}
static int64_t
percent_lcl_peer_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = PERCENT(left, lcl_peer);
per_right = PERCENT(right, lcl_peer);
return per_left - per_right;
}
static int
percent_rmt_peer_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
double per = PERCENT(he, rmt_peer);
char buf[10];
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_rmt_peer_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_rmt_peer);
}
static int64_t
percent_rmt_peer_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = PERCENT(left, rmt_peer);
per_right = PERCENT(right, rmt_peer);
return per_left - per_right;
}
static int
percent_stores_l1hit_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
double per = PERCENT(he, st_l1hit);
char buf[10];
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_stores_l1hit_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_st_l1hit);
}
static int64_t
percent_stores_l1hit_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = PERCENT(left, st_l1hit);
per_right = PERCENT(right, st_l1hit);
return per_left - per_right;
}
static int
percent_stores_l1miss_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
double per = PERCENT(he, st_l1miss);
char buf[10];
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_stores_l1miss_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_st_l1miss);
}
static int64_t
percent_stores_l1miss_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = PERCENT(left, st_l1miss);
per_right = PERCENT(right, st_l1miss);
return per_left - per_right;
}
static int
percent_stores_na_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
double per = PERCENT(he, st_na);
char buf[10];
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
percent_stores_na_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
return percent_color(fmt, hpp, he, percent_st_na);
}
static int64_t
percent_stores_na_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = PERCENT(left, st_na);
per_right = PERCENT(right, st_na);
return per_left - per_right;
}
STAT_FN(lcl_dram)
STAT_FN(rmt_dram)
static int
pid_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
return scnprintf(hpp->buf, hpp->size, "%*d", width, thread__pid(he->thread));
}
static int64_t
pid_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return thread__pid(left->thread) - thread__pid(right->thread);
}
static int64_t
empty_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left __maybe_unused,
struct hist_entry *right __maybe_unused)
{
return 0;
}
static int display_metrics(struct perf_hpp *hpp, u32 val, u32 sum)
{
int ret;
if (sum != 0)
ret = scnprintf(hpp->buf, hpp->size, "%5.1f%% ",
percent(val, sum));
else
ret = scnprintf(hpp->buf, hpp->size, "%6s ", "n/a");
return ret;
}
static int
node_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
bool first = true;
int node;
int ret = 0;
c2c_he = container_of(he, struct c2c_hist_entry, he);
for (node = 0; node < c2c.nodes_cnt; node++) {
DECLARE_BITMAP(set, c2c.cpus_cnt);
bitmap_zero(set, c2c.cpus_cnt);
bitmap_and(set, c2c_he->cpuset, c2c.nodes[node], c2c.cpus_cnt);
if (bitmap_empty(set, c2c.cpus_cnt)) {
if (c2c.node_info == 1) {
ret = scnprintf(hpp->buf, hpp->size, "%21s", " ");
advance_hpp(hpp, ret);
}
continue;
}
if (!first) {
ret = scnprintf(hpp->buf, hpp->size, " ");
advance_hpp(hpp, ret);
}
switch (c2c.node_info) {
case 0:
ret = scnprintf(hpp->buf, hpp->size, "%2d", node);
advance_hpp(hpp, ret);
break;
case 1:
{
int num = bitmap_weight(set, c2c.cpus_cnt);
struct c2c_stats *stats = &c2c_he->node_stats[node];
ret = scnprintf(hpp->buf, hpp->size, "%2d{%2d ", node, num);
advance_hpp(hpp, ret);
switch (c2c.display) {
case DISPLAY_RMT_HITM:
ret = display_metrics(hpp, stats->rmt_hitm,
c2c_he->stats.rmt_hitm);
break;
case DISPLAY_LCL_HITM:
ret = display_metrics(hpp, stats->lcl_hitm,
c2c_he->stats.lcl_hitm);
break;
case DISPLAY_TOT_HITM:
ret = display_metrics(hpp, stats->tot_hitm,
c2c_he->stats.tot_hitm);
break;
case DISPLAY_SNP_PEER:
ret = display_metrics(hpp, stats->tot_peer,
c2c_he->stats.tot_peer);
break;
default:
break;
}
advance_hpp(hpp, ret);
if (c2c_he->stats.store > 0) {
ret = scnprintf(hpp->buf, hpp->size, "%5.1f%%}",
percent(stats->store, c2c_he->stats.store));
} else {
ret = scnprintf(hpp->buf, hpp->size, "%6s}", "n/a");
}
advance_hpp(hpp, ret);
break;
}
case 2:
ret = scnprintf(hpp->buf, hpp->size, "%2d{", node);
advance_hpp(hpp, ret);
ret = bitmap_scnprintf(set, c2c.cpus_cnt, hpp->buf, hpp->size);
advance_hpp(hpp, ret);
ret = scnprintf(hpp->buf, hpp->size, "}");
advance_hpp(hpp, ret);
break;
default:
break;
}
first = false;
}
return 0;
}
static int
mean_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he, double mean)
{
int width = c2c_width(fmt, hpp, he->hists);
char buf[10];
scnprintf(buf, 10, "%6.0f", mean);
return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
}
#define MEAN_ENTRY(__func, __val) \
static int \
__func(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) \
{ \
struct c2c_hist_entry *c2c_he; \
c2c_he = container_of(he, struct c2c_hist_entry, he); \
return mean_entry(fmt, hpp, he, avg_stats(&c2c_he->cstats.__val)); \
}
MEAN_ENTRY(mean_rmt_entry, rmt_hitm);
MEAN_ENTRY(mean_lcl_entry, lcl_hitm);
MEAN_ENTRY(mean_load_entry, load);
MEAN_ENTRY(mean_rmt_peer_entry, rmt_peer);
MEAN_ENTRY(mean_lcl_peer_entry, lcl_peer);
static int
cpucnt_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
char buf[10];
c2c_he = container_of(he, struct c2c_hist_entry, he);
scnprintf(buf, 10, "%d", bitmap_weight(c2c_he->cpuset, c2c.cpus_cnt));
return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
}
static int
cl_idx_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
char buf[10];
c2c_he = container_of(he, struct c2c_hist_entry, he);
scnprintf(buf, 10, "%u", c2c_he->cacheline_idx);
return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
}
static int
cl_idx_empty_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = c2c_width(fmt, hpp, he->hists);
return scnprintf(hpp->buf, hpp->size, "%*s", width, "");
}
#define HEADER_LOW(__h) \
{ \
.line[1] = { \
.text = __h, \
}, \
}
#define HEADER_BOTH(__h0, __h1) \
{ \
.line[0] = { \
.text = __h0, \
}, \
.line[1] = { \
.text = __h1, \
}, \
}
#define HEADER_SPAN(__h0, __h1, __s) \
{ \
.line[0] = { \
.text = __h0, \
.span = __s, \
}, \
.line[1] = { \
.text = __h1, \
}, \
}
#define HEADER_SPAN_LOW(__h) \
{ \
.line[1] = { \
.text = __h, \
}, \
}
static struct c2c_dimension dim_dcacheline = {
.header = HEADER_SPAN("--- Cacheline ----", "Address", 2),
.name = "dcacheline",
.cmp = dcacheline_cmp,
.entry = dcacheline_entry,
.width = 18,
};
static struct c2c_dimension dim_dcacheline_node = {
.header = HEADER_LOW("Node"),
.name = "dcacheline_node",
.cmp = empty_cmp,
.entry = dcacheline_node_entry,
.width = 4,
};
static struct c2c_dimension dim_dcacheline_count = {
.header = HEADER_LOW("PA cnt"),
.name = "dcacheline_count",
.cmp = empty_cmp,
.entry = dcacheline_node_count,
.width = 6,
};
static struct c2c_header header_offset_tui = HEADER_SPAN("-----", "Off", 2);
static struct c2c_dimension dim_offset = {
.header = HEADER_SPAN("--- Data address -", "Offset", 2),
.name = "offset",
.cmp = offset_cmp,
.entry = offset_entry,
.width = 18,
};
static struct c2c_dimension dim_offset_node = {
.header = HEADER_LOW("Node"),
.name = "offset_node",
.cmp = empty_cmp,
.entry = dcacheline_node_entry,
.width = 4,
};
static struct c2c_dimension dim_iaddr = {
.header = HEADER_LOW("Code address"),
.name = "iaddr",
.cmp = iaddr_cmp,
.entry = iaddr_entry,
.width = 18,
};
static struct c2c_dimension dim_tot_hitm = {
.header = HEADER_SPAN("------- Load Hitm -------", "Total", 2),
.name = "tot_hitm",
.cmp = tot_hitm_cmp,
.entry = tot_hitm_entry,
.width = 7,
};
static struct c2c_dimension dim_lcl_hitm = {
.header = HEADER_SPAN_LOW("LclHitm"),
.name = "lcl_hitm",
.cmp = lcl_hitm_cmp,
.entry = lcl_hitm_entry,
.width = 7,
};
static struct c2c_dimension dim_rmt_hitm = {
.header = HEADER_SPAN_LOW("RmtHitm"),
.name = "rmt_hitm",
.cmp = rmt_hitm_cmp,
.entry = rmt_hitm_entry,
.width = 7,
};
static struct c2c_dimension dim_tot_peer = {
.header = HEADER_SPAN("------- Load Peer -------", "Total", 2),
.name = "tot_peer",
.cmp = tot_peer_cmp,
.entry = tot_peer_entry,
.width = 7,
};
static struct c2c_dimension dim_lcl_peer = {
.header = HEADER_SPAN_LOW("Local"),
.name = "lcl_peer",
.cmp = lcl_peer_cmp,
.entry = lcl_peer_entry,
.width = 7,
};
static struct c2c_dimension dim_rmt_peer = {
.header = HEADER_SPAN_LOW("Remote"),
.name = "rmt_peer",
.cmp = rmt_peer_cmp,
.entry = rmt_peer_entry,
.width = 7,
};
static struct c2c_dimension dim_cl_rmt_hitm = {
.header = HEADER_SPAN("----- HITM -----", "Rmt", 1),
.name = "cl_rmt_hitm",
.cmp = rmt_hitm_cmp,
.entry = rmt_hitm_entry,
.width = 7,
};
static struct c2c_dimension dim_cl_lcl_hitm = {
.header = HEADER_SPAN_LOW("Lcl"),
.name = "cl_lcl_hitm",
.cmp = lcl_hitm_cmp,
.entry = lcl_hitm_entry,
.width = 7,
};
static struct c2c_dimension dim_cl_rmt_peer = {
.header = HEADER_SPAN("----- Peer -----", "Rmt", 1),
.name = "cl_rmt_peer",
.cmp = rmt_peer_cmp,
.entry = rmt_peer_entry,
.width = 7,
};
static struct c2c_dimension dim_cl_lcl_peer = {
.header = HEADER_SPAN_LOW("Lcl"),
.name = "cl_lcl_peer",
.cmp = lcl_peer_cmp,
.entry = lcl_peer_entry,
.width = 7,
};
static struct c2c_dimension dim_tot_stores = {
.header = HEADER_BOTH("Total", "Stores"),
.name = "tot_stores",
.cmp = store_cmp,
.entry = store_entry,
.width = 7,
};
static struct c2c_dimension dim_stores_l1hit = {
.header = HEADER_SPAN("--------- Stores --------", "L1Hit", 2),
.name = "stores_l1hit",
.cmp = st_l1hit_cmp,
.entry = st_l1hit_entry,
.width = 7,
};
static struct c2c_dimension dim_stores_l1miss = {
.header = HEADER_SPAN_LOW("L1Miss"),
.name = "stores_l1miss",
.cmp = st_l1miss_cmp,
.entry = st_l1miss_entry,
.width = 7,
};
static struct c2c_dimension dim_stores_na = {
.header = HEADER_SPAN_LOW("N/A"),
.name = "stores_na",
.cmp = st_na_cmp,
.entry = st_na_entry,
.width = 7,
};
static struct c2c_dimension dim_cl_stores_l1hit = {
.header = HEADER_SPAN("------- Store Refs ------", "L1 Hit", 2),
.name = "cl_stores_l1hit",
.cmp = st_l1hit_cmp,
.entry = st_l1hit_entry,
.width = 7,
};
static struct c2c_dimension dim_cl_stores_l1miss = {
.header = HEADER_SPAN_LOW("L1 Miss"),
.name = "cl_stores_l1miss",
.cmp = st_l1miss_cmp,
.entry = st_l1miss_entry,
.width = 7,
};
static struct c2c_dimension dim_cl_stores_na = {
.header = HEADER_SPAN_LOW("N/A"),
.name = "cl_stores_na",
.cmp = st_na_cmp,
.entry = st_na_entry,
.width = 7,
};
static struct c2c_dimension dim_ld_fbhit = {
.header = HEADER_SPAN("----- Core Load Hit -----", "FB", 2),
.name = "ld_fbhit",
.cmp = ld_fbhit_cmp,
.entry = ld_fbhit_entry,
.width = 7,
};
static struct c2c_dimension dim_ld_l1hit = {
.header = HEADER_SPAN_LOW("L1"),
.name = "ld_l1hit",
.cmp = ld_l1hit_cmp,
.entry = ld_l1hit_entry,
.width = 7,
};
static struct c2c_dimension dim_ld_l2hit = {
.header = HEADER_SPAN_LOW("L2"),
.name = "ld_l2hit",
.cmp = ld_l2hit_cmp,
.entry = ld_l2hit_entry,
.width = 7,
};
static struct c2c_dimension dim_ld_llchit = {
.header = HEADER_SPAN("- LLC Load Hit --", "LclHit", 1),
.name = "ld_lclhit",
.cmp = ld_llchit_cmp,
.entry = ld_llchit_entry,
.width = 8,
};
static struct c2c_dimension dim_ld_rmthit = {
.header = HEADER_SPAN("- RMT Load Hit --", "RmtHit", 1),
.name = "ld_rmthit",
.cmp = rmt_hit_cmp,
.entry = rmt_hit_entry,
.width = 8,
};
static struct c2c_dimension dim_tot_recs = {
.header = HEADER_BOTH("Total", "records"),
.name = "tot_recs",
.cmp = tot_recs_cmp,
.entry = tot_recs_entry,
.width = 7,
};
static struct c2c_dimension dim_tot_loads = {
.header = HEADER_BOTH("Total", "Loads"),
.name = "tot_loads",
.cmp = tot_loads_cmp,
.entry = tot_loads_entry,
.width = 7,
};
static struct c2c_header percent_costly_snoop_header[] = {
[DISPLAY_LCL_HITM] = HEADER_BOTH("Lcl", "Hitm"),
[DISPLAY_RMT_HITM] = HEADER_BOTH("Rmt", "Hitm"),
[DISPLAY_TOT_HITM] = HEADER_BOTH("Tot", "Hitm"),
[DISPLAY_SNP_PEER] = HEADER_BOTH("Peer", "Snoop"),
};
static struct c2c_dimension dim_percent_costly_snoop = {
.name = "percent_costly_snoop",
.cmp = percent_costly_snoop_cmp,
.entry = percent_costly_snoop_entry,
.color = percent_costly_snoop_color,
.width = 7,
};
static struct c2c_dimension dim_percent_rmt_hitm = {
.header = HEADER_SPAN("----- HITM -----", "RmtHitm", 1),
.name = "percent_rmt_hitm",
.cmp = percent_rmt_hitm_cmp,
.entry = percent_rmt_hitm_entry,
.color = percent_rmt_hitm_color,
.width = 7,
};
static struct c2c_dimension dim_percent_lcl_hitm = {
.header = HEADER_SPAN_LOW("LclHitm"),
.name = "percent_lcl_hitm",
.cmp = percent_lcl_hitm_cmp,
.entry = percent_lcl_hitm_entry,
.color = percent_lcl_hitm_color,
.width = 7,
};
static struct c2c_dimension dim_percent_rmt_peer = {
.header = HEADER_SPAN("-- Peer Snoop --", "Rmt", 1),
.name = "percent_rmt_peer",
.cmp = percent_rmt_peer_cmp,
.entry = percent_rmt_peer_entry,
.color = percent_rmt_peer_color,
.width = 7,
};
static struct c2c_dimension dim_percent_lcl_peer = {
.header = HEADER_SPAN_LOW("Lcl"),
.name = "percent_lcl_peer",
.cmp = percent_lcl_peer_cmp,
.entry = percent_lcl_peer_entry,
.color = percent_lcl_peer_color,
.width = 7,
};
static struct c2c_dimension dim_percent_stores_l1hit = {
.header = HEADER_SPAN("------- Store Refs ------", "L1 Hit", 2),
.name = "percent_stores_l1hit",
.cmp = percent_stores_l1hit_cmp,
.entry = percent_stores_l1hit_entry,
.color = percent_stores_l1hit_color,
.width = 7,
};
static struct c2c_dimension dim_percent_stores_l1miss = {
.header = HEADER_SPAN_LOW("L1 Miss"),
.name = "percent_stores_l1miss",
.cmp = percent_stores_l1miss_cmp,
.entry = percent_stores_l1miss_entry,
.color = percent_stores_l1miss_color,
.width = 7,
};
static struct c2c_dimension dim_percent_stores_na = {
.header = HEADER_SPAN_LOW("N/A"),
.name = "percent_stores_na",
.cmp = percent_stores_na_cmp,
.entry = percent_stores_na_entry,
.color = percent_stores_na_color,
.width = 7,
};
static struct c2c_dimension dim_dram_lcl = {
.header = HEADER_SPAN("--- Load Dram ----", "Lcl", 1),
.name = "dram_lcl",
.cmp = lcl_dram_cmp,
.entry = lcl_dram_entry,
.width = 8,
};
static struct c2c_dimension dim_dram_rmt = {
.header = HEADER_SPAN_LOW("Rmt"),
.name = "dram_rmt",
.cmp = rmt_dram_cmp,
.entry = rmt_dram_entry,
.width = 8,
};
static struct c2c_dimension dim_pid = {
.header = HEADER_LOW("Pid"),
.name = "pid",
.cmp = pid_cmp,
.entry = pid_entry,
.width = 7,
};
static struct c2c_dimension dim_tid = {
.header = HEADER_LOW("Tid"),
.name = "tid",
.se = &sort_thread,
};
static struct c2c_dimension dim_symbol = {
.name = "symbol",
.se = &sort_sym,
};
static struct c2c_dimension dim_dso = {
.header = HEADER_BOTH("Shared", "Object"),
.name = "dso",
.se = &sort_dso,
};
static struct c2c_dimension dim_node = {
.name = "node",
.cmp = empty_cmp,
.entry = node_entry,
.width = 4,
};
static struct c2c_dimension dim_mean_rmt = {
.header = HEADER_SPAN("---------- cycles ----------", "rmt hitm", 2),
.name = "mean_rmt",
.cmp = empty_cmp,
.entry = mean_rmt_entry,
.width = 8,
};
static struct c2c_dimension dim_mean_lcl = {
.header = HEADER_SPAN_LOW("lcl hitm"),
.name = "mean_lcl",
.cmp = empty_cmp,
.entry = mean_lcl_entry,
.width = 8,
};
static struct c2c_dimension dim_mean_load = {
.header = HEADER_SPAN_LOW("load"),
.name = "mean_load",
.cmp = empty_cmp,
.entry = mean_load_entry,
.width = 8,
};
static struct c2c_dimension dim_mean_rmt_peer = {
.header = HEADER_SPAN("---------- cycles ----------", "rmt peer", 2),
.name = "mean_rmt_peer",
.cmp = empty_cmp,
.entry = mean_rmt_peer_entry,
.width = 8,
};
static struct c2c_dimension dim_mean_lcl_peer = {
.header = HEADER_SPAN_LOW("lcl peer"),
.name = "mean_lcl_peer",
.cmp = empty_cmp,
.entry = mean_lcl_peer_entry,
.width = 8,
};
static struct c2c_dimension dim_cpucnt = {
.header = HEADER_BOTH("cpu", "cnt"),
.name = "cpucnt",
.cmp = empty_cmp,
.entry = cpucnt_entry,
.width = 8,
};
static struct c2c_dimension dim_srcline = {
.name = "cl_srcline",
.se = &sort_srcline,
};
static struct c2c_dimension dim_dcacheline_idx = {
.header = HEADER_LOW("Index"),
.name = "cl_idx",
.cmp = empty_cmp,
.entry = cl_idx_entry,
.width = 5,
};
static struct c2c_dimension dim_dcacheline_num = {
.header = HEADER_LOW("Num"),
.name = "cl_num",
.cmp = empty_cmp,
.entry = cl_idx_entry,
.width = 5,
};
static struct c2c_dimension dim_dcacheline_num_empty = {
.header = HEADER_LOW("Num"),
.name = "cl_num_empty",
.cmp = empty_cmp,
.entry = cl_idx_empty_entry,
.width = 5,
};
static struct c2c_dimension *dimensions[] = {
&dim_dcacheline,
&dim_dcacheline_node,
&dim_dcacheline_count,
&dim_offset,
&dim_offset_node,
&dim_iaddr,
&dim_tot_hitm,
&dim_lcl_hitm,
&dim_rmt_hitm,
&dim_tot_peer,
&dim_lcl_peer,
&dim_rmt_peer,
&dim_cl_lcl_hitm,
&dim_cl_rmt_hitm,
&dim_cl_lcl_peer,
&dim_cl_rmt_peer,
&dim_tot_stores,
&dim_stores_l1hit,
&dim_stores_l1miss,
&dim_stores_na,
&dim_cl_stores_l1hit,
&dim_cl_stores_l1miss,
&dim_cl_stores_na,
&dim_ld_fbhit,
&dim_ld_l1hit,
&dim_ld_l2hit,
&dim_ld_llchit,
&dim_ld_rmthit,
&dim_tot_recs,
&dim_tot_loads,
&dim_percent_costly_snoop,
&dim_percent_rmt_hitm,
&dim_percent_lcl_hitm,
&dim_percent_rmt_peer,
&dim_percent_lcl_peer,
&dim_percent_stores_l1hit,
&dim_percent_stores_l1miss,
&dim_percent_stores_na,
&dim_dram_lcl,
&dim_dram_rmt,
&dim_pid,
&dim_tid,
&dim_symbol,
&dim_dso,
&dim_node,
&dim_mean_rmt,
&dim_mean_lcl,
&dim_mean_rmt_peer,
&dim_mean_lcl_peer,
&dim_mean_load,
&dim_cpucnt,
&dim_srcline,
&dim_dcacheline_idx,
&dim_dcacheline_num,
&dim_dcacheline_num_empty,
NULL,
};
static void fmt_free(struct perf_hpp_fmt *fmt)
{
struct c2c_fmt *c2c_fmt;
c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
free(c2c_fmt);
}
static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
{
struct c2c_fmt *c2c_a = container_of(a, struct c2c_fmt, fmt);
struct c2c_fmt *c2c_b = container_of(b, struct c2c_fmt, fmt);
return c2c_a->dim == c2c_b->dim;
}
static struct c2c_dimension *get_dimension(const char *name)
{
unsigned int i;
for (i = 0; dimensions[i]; i++) {
struct c2c_dimension *dim = dimensions[i];
if (!strcmp(dim->name, name))
return dim;
}
return NULL;
}
static int c2c_se_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct c2c_fmt *c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
struct c2c_dimension *dim = c2c_fmt->dim;
size_t len = fmt->user_len;
if (!len) {
len = hists__col_len(he->hists, dim->se->se_width_idx);
if (dim == &dim_symbol || dim == &dim_srcline)
len = symbol_width(he->hists, dim->se);
}
return dim->se->se_snprintf(he, hpp->buf, hpp->size, len);
}
static int64_t c2c_se_cmp(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct c2c_fmt *c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
struct c2c_dimension *dim = c2c_fmt->dim;
return dim->se->se_cmp(a, b);
}
static int64_t c2c_se_collapse(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct c2c_fmt *c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
struct c2c_dimension *dim = c2c_fmt->dim;
int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
collapse_fn = dim->se->se_collapse ?: dim->se->se_cmp;
return collapse_fn(a, b);
}
static struct c2c_fmt *get_format(const char *name)
{
struct c2c_dimension *dim = get_dimension(name);
struct c2c_fmt *c2c_fmt;
struct perf_hpp_fmt *fmt;
if (!dim)
return NULL;
c2c_fmt = zalloc(sizeof(*c2c_fmt));
if (!c2c_fmt)
return NULL;
c2c_fmt->dim = dim;
fmt = &c2c_fmt->fmt;
INIT_LIST_HEAD(&fmt->list);
INIT_LIST_HEAD(&fmt->sort_list);
fmt->cmp = dim->se ? c2c_se_cmp : dim->cmp;
fmt->sort = dim->se ? c2c_se_cmp : dim->cmp;
fmt->color = dim->se ? NULL : dim->color;
fmt->entry = dim->se ? c2c_se_entry : dim->entry;
fmt->header = c2c_header;
fmt->width = c2c_width;
fmt->collapse = dim->se ? c2c_se_collapse : dim->cmp;
fmt->equal = fmt_equal;
fmt->free = fmt_free;
return c2c_fmt;
}
static int c2c_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
{
struct c2c_fmt *c2c_fmt = get_format(name);
if (!c2c_fmt) {
reset_dimensions();
return output_field_add(hpp_list, name);
}
perf_hpp_list__column_register(hpp_list, &c2c_fmt->fmt);
return 0;
}
static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
{
struct c2c_fmt *c2c_fmt = get_format(name);
struct c2c_dimension *dim;
if (!c2c_fmt) {
reset_dimensions();
return sort_dimension__add(hpp_list, name, NULL, 0);
}
dim = c2c_fmt->dim;
if (dim == &dim_dso)
hpp_list->dso = 1;
perf_hpp_list__register_sort_field(hpp_list, &c2c_fmt->fmt);
return 0;
}
#define PARSE_LIST(_list, _fn) \
do { \
char *tmp, *tok; \
ret = 0; \
\
if (!_list) \
break; \
\
for (tok = strtok_r((char *)_list, ", ", &tmp); \
tok; tok = strtok_r(NULL, ", ", &tmp)) { \
ret = _fn(hpp_list, tok); \
if (ret == -EINVAL) { \
pr_err("Invalid --fields key: `%s'", tok); \
break; \
} else if (ret == -ESRCH) { \
pr_err("Unknown --fields key: `%s'", tok); \
break; \
} \
} \
} while (0)
static int hpp_list__parse(struct perf_hpp_list *hpp_list,
const char *output_,
const char *sort_)
{
char *output = output_ ? strdup(output_) : NULL;
char *sort = sort_ ? strdup(sort_) : NULL;
int ret;
PARSE_LIST(output, c2c_hists__init_output);
PARSE_LIST(sort, c2c_hists__init_sort);
/* copy sort keys to output fields */
perf_hpp__setup_output_field(hpp_list);
/*
* We dont need other sorting keys other than those
* we already specified. It also really slows down
* the processing a lot with big number of output
* fields, so switching this off for c2c.
*/
#if 0
/* and then copy output fields to sort keys */
perf_hpp__append_sort_keys(&hists->list);
#endif
free(output);
free(sort);
return ret;
}
static int c2c_hists__init(struct c2c_hists *hists,
const char *sort,
int nr_header_lines)
{
__hists__init(&hists->hists, &hists->list);
/*
* Initialize only with sort fields, we need to resort
* later anyway, and that's where we add output fields
* as well.
*/
perf_hpp_list__init(&hists->list);
/* Overload number of header lines.*/
hists->list.nr_header_lines = nr_header_lines;
return hpp_list__parse(&hists->list, NULL, sort);
}
static int c2c_hists__reinit(struct c2c_hists *c2c_hists,
const char *output,
const char *sort)
{
perf_hpp__reset_output_field(&c2c_hists->list);
return hpp_list__parse(&c2c_hists->list, output, sort);
}
#define DISPLAY_LINE_LIMIT 0.001
static u8 filter_display(u32 val, u32 sum)
{
if (sum == 0 || ((double)val / sum) < DISPLAY_LINE_LIMIT)
return HIST_FILTER__C2C;
return 0;
}
static bool he__display(struct hist_entry *he, struct c2c_stats *stats)
{
struct c2c_hist_entry *c2c_he;
if (c2c.show_all)
return true;
c2c_he = container_of(he, struct c2c_hist_entry, he);
switch (c2c.display) {
case DISPLAY_LCL_HITM:
he->filtered = filter_display(c2c_he->stats.lcl_hitm,
stats->lcl_hitm);
break;
case DISPLAY_RMT_HITM:
he->filtered = filter_display(c2c_he->stats.rmt_hitm,
stats->rmt_hitm);
break;
case DISPLAY_TOT_HITM:
he->filtered = filter_display(c2c_he->stats.tot_hitm,
stats->tot_hitm);
break;
case DISPLAY_SNP_PEER:
he->filtered = filter_display(c2c_he->stats.tot_peer,
stats->tot_peer);
break;
default:
break;
}
return he->filtered == 0;
}
static inline bool is_valid_hist_entry(struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
bool has_record = false;
c2c_he = container_of(he, struct c2c_hist_entry, he);
/* It's a valid entry if contains stores */
if (c2c_he->stats.store)
return true;
switch (c2c.display) {
case DISPLAY_LCL_HITM:
has_record = !!c2c_he->stats.lcl_hitm;
break;
case DISPLAY_RMT_HITM:
has_record = !!c2c_he->stats.rmt_hitm;
break;
case DISPLAY_TOT_HITM:
has_record = !!c2c_he->stats.tot_hitm;
break;
case DISPLAY_SNP_PEER:
has_record = !!c2c_he->stats.tot_peer;
default:
break;
}
return has_record;
}
static void set_node_width(struct c2c_hist_entry *c2c_he, int len)
{
struct c2c_dimension *dim;
dim = &c2c.hists == c2c_he->hists ?
&dim_dcacheline_node : &dim_offset_node;
if (len > dim->width)
dim->width = len;
}
static int set_nodestr(struct c2c_hist_entry *c2c_he)
{
char buf[30];
int len;
if (c2c_he->nodestr)
return 0;
if (!bitmap_empty(c2c_he->nodeset, c2c.nodes_cnt)) {
len = bitmap_scnprintf(c2c_he->nodeset, c2c.nodes_cnt,
buf, sizeof(buf));
} else {
len = scnprintf(buf, sizeof(buf), "N/A");
}
set_node_width(c2c_he, len);
c2c_he->nodestr = strdup(buf);
return c2c_he->nodestr ? 0 : -ENOMEM;
}
static void calc_width(struct c2c_hist_entry *c2c_he)
{
struct c2c_hists *c2c_hists;
c2c_hists = container_of(c2c_he->he.hists, struct c2c_hists, hists);
hists__calc_col_len(&c2c_hists->hists, &c2c_he->he);
set_nodestr(c2c_he);
}
static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
if (c2c.show_src && !he->srcline)
he->srcline = hist_entry__srcline(he);
calc_width(c2c_he);
if (!is_valid_hist_entry(he))
he->filtered = HIST_FILTER__C2C;
return 0;
}
static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
bool display = he__display(he, &c2c.shared_clines_stats);
c2c_he = container_of(he, struct c2c_hist_entry, he);
c2c_hists = c2c_he->hists;
if (display && c2c_hists) {
static unsigned int idx;
c2c_he->cacheline_idx = idx++;
calc_width(c2c_he);
c2c_hists__reinit(c2c_hists, c2c.cl_output, c2c.cl_resort);
hists__collapse_resort(&c2c_hists->hists, NULL);
hists__output_resort_cb(&c2c_hists->hists, NULL, filter_cb);
}
return 0;
}
static struct c2c_header header_node_0 = HEADER_LOW("Node");
static struct c2c_header header_node_1_hitms_stores =
HEADER_LOW("Node{cpus %hitms %stores}");
static struct c2c_header header_node_1_peers_stores =
HEADER_LOW("Node{cpus %peers %stores}");
static struct c2c_header header_node_2 = HEADER_LOW("Node{cpu list}");
static void setup_nodes_header(void)
{
switch (c2c.node_info) {
case 0:
dim_node.header = header_node_0;
break;
case 1:
if (c2c.display == DISPLAY_SNP_PEER)
dim_node.header = header_node_1_peers_stores;
else
dim_node.header = header_node_1_hitms_stores;
break;
case 2:
dim_node.header = header_node_2;
break;
default:
break;
}
return;
}
static int setup_nodes(struct perf_session *session)
{
struct numa_node *n;
unsigned long **nodes;
int node, idx;
struct perf_cpu cpu;
int *cpu2node;
if (c2c.node_info > 2)
c2c.node_info = 2;
c2c.nodes_cnt = session->header.env.nr_numa_nodes;
c2c.cpus_cnt = session->header.env.nr_cpus_avail;
n = session->header.env.numa_nodes;
if (!n)
return -EINVAL;
nodes = zalloc(sizeof(unsigned long *) * c2c.nodes_cnt);
if (!nodes)
return -ENOMEM;
c2c.nodes = nodes;
cpu2node = zalloc(sizeof(int) * c2c.cpus_cnt);
if (!cpu2node)
return -ENOMEM;
for (idx = 0; idx < c2c.cpus_cnt; idx++)
cpu2node[idx] = -1;
c2c.cpu2node = cpu2node;
for (node = 0; node < c2c.nodes_cnt; node++) {
struct perf_cpu_map *map = n[node].map;
unsigned long *set;
set = bitmap_zalloc(c2c.cpus_cnt);
if (!set)
return -ENOMEM;
nodes[node] = set;
/* empty node, skip */
if (perf_cpu_map__empty(map))
continue;
perf_cpu_map__for_each_cpu(cpu, idx, map) {
__set_bit(cpu.cpu, set);
if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
return -EINVAL;
cpu2node[cpu.cpu] = node;
}
}
setup_nodes_header();
return 0;
}
#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
#define HAS_PEER(__h) ((__h)->stats.lcl_peer || (__h)->stats.rmt_peer)
static int resort_shared_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
if (HAS_HITMS(c2c_he) || HAS_PEER(c2c_he)) {
c2c.shared_clines++;
c2c_add_stats(&c2c.shared_clines_stats, &c2c_he->stats);
}
return 0;
}
static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
{
struct rb_node *next = rb_first_cached(&hists->entries);
int ret = 0;
while (next) {
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node);
ret = cb(he, NULL);
if (ret)
break;
next = rb_next(&he->rb_node);
}
return ret;
}
static void print_c2c__display_stats(FILE *out)
{
int llc_misses;
struct c2c_stats *stats = &c2c.hists.stats;
llc_misses = get_load_llc_misses(stats);
fprintf(out, "=================================================\n");
fprintf(out, " Trace Event Information \n");
fprintf(out, "=================================================\n");
fprintf(out, " Total records : %10d\n", stats->nr_entries);
fprintf(out, " Locked Load/Store Operations : %10d\n", stats->locks);
fprintf(out, " Load Operations : %10d\n", stats->load);
fprintf(out, " Loads - uncacheable : %10d\n", stats->ld_uncache);
fprintf(out, " Loads - IO : %10d\n", stats->ld_io);
fprintf(out, " Loads - Miss : %10d\n", stats->ld_miss);
fprintf(out, " Loads - no mapping : %10d\n", stats->ld_noadrs);
fprintf(out, " Load Fill Buffer Hit : %10d\n", stats->ld_fbhit);
fprintf(out, " Load L1D hit : %10d\n", stats->ld_l1hit);
fprintf(out, " Load L2D hit : %10d\n", stats->ld_l2hit);
fprintf(out, " Load LLC hit : %10d\n", stats->ld_llchit + stats->lcl_hitm);
fprintf(out, " Load Local HITM : %10d\n", stats->lcl_hitm);
fprintf(out, " Load Remote HITM : %10d\n", stats->rmt_hitm);
fprintf(out, " Load Remote HIT : %10d\n", stats->rmt_hit);
fprintf(out, " Load Local DRAM : %10d\n", stats->lcl_dram);
fprintf(out, " Load Remote DRAM : %10d\n", stats->rmt_dram);
fprintf(out, " Load MESI State Exclusive : %10d\n", stats->ld_excl);
fprintf(out, " Load MESI State Shared : %10d\n", stats->ld_shared);
fprintf(out, " Load LLC Misses : %10d\n", llc_misses);
fprintf(out, " Load access blocked by data : %10d\n", stats->blk_data);
fprintf(out, " Load access blocked by address : %10d\n", stats->blk_addr);
fprintf(out, " Load HIT Local Peer : %10d\n", stats->lcl_peer);
fprintf(out, " Load HIT Remote Peer : %10d\n", stats->rmt_peer);
fprintf(out, " LLC Misses to Local DRAM : %10.1f%%\n", ((double)stats->lcl_dram/(double)llc_misses) * 100.);
fprintf(out, " LLC Misses to Remote DRAM : %10.1f%%\n", ((double)stats->rmt_dram/(double)llc_misses) * 100.);
fprintf(out, " LLC Misses to Remote cache (HIT) : %10.1f%%\n", ((double)stats->rmt_hit /(double)llc_misses) * 100.);
fprintf(out, " LLC Misses to Remote cache (HITM) : %10.1f%%\n", ((double)stats->rmt_hitm/(double)llc_misses) * 100.);
fprintf(out, " Store Operations : %10d\n", stats->store);
fprintf(out, " Store - uncacheable : %10d\n", stats->st_uncache);
fprintf(out, " Store - no mapping : %10d\n", stats->st_noadrs);
fprintf(out, " Store L1D Hit : %10d\n", stats->st_l1hit);
fprintf(out, " Store L1D Miss : %10d\n", stats->st_l1miss);
fprintf(out, " Store No available memory level : %10d\n", stats->st_na);
fprintf(out, " No Page Map Rejects : %10d\n", stats->nomap);
fprintf(out, " Unable to parse data source : %10d\n", stats->noparse);
}
static void print_shared_cacheline_info(FILE *out)
{
struct c2c_stats *stats = &c2c.shared_clines_stats;
int hitm_cnt = stats->lcl_hitm + stats->rmt_hitm;
fprintf(out, "=================================================\n");
fprintf(out, " Global Shared Cache Line Event Information \n");
fprintf(out, "=================================================\n");
fprintf(out, " Total Shared Cache Lines : %10d\n", c2c.shared_clines);
fprintf(out, " Load HITs on shared lines : %10d\n", stats->load);
fprintf(out, " Fill Buffer Hits on shared lines : %10d\n", stats->ld_fbhit);
fprintf(out, " L1D hits on shared lines : %10d\n", stats->ld_l1hit);
fprintf(out, " L2D hits on shared lines : %10d\n", stats->ld_l2hit);
fprintf(out, " LLC hits on shared lines : %10d\n", stats->ld_llchit + stats->lcl_hitm);
fprintf(out, " Load hits on peer cache or nodes : %10d\n", stats->lcl_peer + stats->rmt_peer);
fprintf(out, " Locked Access on shared lines : %10d\n", stats->locks);
fprintf(out, " Blocked Access on shared lines : %10d\n", stats->blk_data + stats->blk_addr);
fprintf(out, " Store HITs on shared lines : %10d\n", stats->store);
fprintf(out, " Store L1D hits on shared lines : %10d\n", stats->st_l1hit);
fprintf(out, " Store No available memory level : %10d\n", stats->st_na);
fprintf(out, " Total Merged records : %10d\n", hitm_cnt + stats->store);
}
static void print_cacheline(struct c2c_hists *c2c_hists,
struct hist_entry *he_cl,
struct perf_hpp_list *hpp_list,
FILE *out)
{
char bf[1000];
struct perf_hpp hpp = {
.buf = bf,
.size = 1000,
};
static bool once;
if (!once) {
hists__fprintf_headers(&c2c_hists->hists, out);
once = true;
} else {
fprintf(out, "\n");
}
fprintf(out, " ----------------------------------------------------------------------\n");
__hist_entry__snprintf(he_cl, &hpp, hpp_list);
fprintf(out, "%s\n", bf);
fprintf(out, " ----------------------------------------------------------------------\n");
hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false);
}
static void print_pareto(FILE *out)
{
struct perf_hpp_list hpp_list;
struct rb_node *nd;
int ret;
const char *cl_output;
if (c2c.display != DISPLAY_SNP_PEER)
cl_output = "cl_num,"
"cl_rmt_hitm,"
"cl_lcl_hitm,"
"cl_stores_l1hit,"
"cl_stores_l1miss,"
"cl_stores_na,"
"dcacheline";
else
cl_output = "cl_num,"
"cl_rmt_peer,"
"cl_lcl_peer,"
"cl_stores_l1hit,"
"cl_stores_l1miss,"
"cl_stores_na,"
"dcacheline";
perf_hpp_list__init(&hpp_list);
ret = hpp_list__parse(&hpp_list, cl_output, NULL);
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
nd = rb_first_cached(&c2c.hists.hists.entries);
for (; nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct c2c_hist_entry *c2c_he;
if (he->filtered)
continue;
c2c_he = container_of(he, struct c2c_hist_entry, he);
print_cacheline(c2c_he->hists, he, &hpp_list, out);
}
}
static void print_c2c_info(FILE *out, struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel;
bool first = true;
fprintf(out, "=================================================\n");
fprintf(out, " c2c details \n");
fprintf(out, "=================================================\n");
evlist__for_each_entry(evlist, evsel) {
fprintf(out, "%-36s: %s\n", first ? " Events" : "", evsel__name(evsel));
first = false;
}
fprintf(out, " Cachelines sort on : %s\n",
display_str[c2c.display]);
fprintf(out, " Cacheline data grouping : %s\n", c2c.cl_sort);
}
static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
{
setup_pager();
print_c2c__display_stats(out);
fprintf(out, "\n");
print_shared_cacheline_info(out);
fprintf(out, "\n");
print_c2c_info(out, session);
if (c2c.stats_only)
return;
fprintf(out, "\n");
fprintf(out, "=================================================\n");
fprintf(out, " Shared Data Cache Line Table \n");
fprintf(out, "=================================================\n");
fprintf(out, "#\n");
hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, true);
fprintf(out, "\n");
fprintf(out, "=================================================\n");
fprintf(out, " Shared Cache Line Distribution Pareto \n");
fprintf(out, "=================================================\n");
fprintf(out, "#\n");
print_pareto(out);
}
#ifdef HAVE_SLANG_SUPPORT
static void c2c_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
struct rb_node *nd = rb_first_cached(&hb->hists->entries);
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
if (!he->filtered)
nr_entries++;
nd = rb_next(nd);
}
hb->nr_non_filtered_entries = nr_entries;
}
struct c2c_cacheline_browser {
struct hist_browser hb;
struct hist_entry *he;
};
static int
perf_c2c_cacheline_browser__title(struct hist_browser *browser,
char *bf, size_t size)
{
struct c2c_cacheline_browser *cl_browser;
struct hist_entry *he;
uint64_t addr = 0;
cl_browser = container_of(browser, struct c2c_cacheline_browser, hb);
he = cl_browser->he;
if (he->mem_info)
addr = cl_address(he->mem_info->daddr.addr, chk_double_cl);
scnprintf(bf, size, "Cacheline 0x%lx", addr);
return 0;
}
static struct c2c_cacheline_browser*
c2c_cacheline_browser__new(struct hists *hists, struct hist_entry *he)
{
struct c2c_cacheline_browser *browser;
browser = zalloc(sizeof(*browser));
if (browser) {
hist_browser__init(&browser->hb, hists);
browser->hb.c2c_filter = true;
browser->hb.title = perf_c2c_cacheline_browser__title;
browser->he = he;
}
return browser;
}
static int perf_c2c__browse_cacheline(struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
struct c2c_cacheline_browser *cl_browser;
struct hist_browser *browser;
int key = -1;
static const char help[] =
" ENTER Toggle callchains (if present) \n"
" n Toggle Node details info \n"
" s Toggle full length of symbol and source line columns \n"
" q Return back to cacheline list \n";
if (!he)
return 0;
/* Display compact version first. */
c2c.symbol_full = false;
c2c_he = container_of(he, struct c2c_hist_entry, he);
c2c_hists = c2c_he->hists;
cl_browser = c2c_cacheline_browser__new(&c2c_hists->hists, he);
if (cl_browser == NULL)
return -1;
browser = &cl_browser->hb;
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
c2c_browser__update_nr_entries(browser);
while (1) {
key = hist_browser__run(browser, "? - help", true, 0);
switch (key) {
case 's':
c2c.symbol_full = !c2c.symbol_full;
break;
case 'n':
c2c.node_info = (c2c.node_info + 1) % 3;
setup_nodes_header();
break;
case 'q':
goto out;
case '?':
ui_browser__help_window(&browser->b, help);
break;
default:
break;
}
}
out:
free(cl_browser);
return 0;
}
static int perf_c2c_browser__title(struct hist_browser *browser,
char *bf, size_t size)
{
scnprintf(bf, size,
"Shared Data Cache Line Table "
"(%lu entries, sorted on %s)",
browser->nr_non_filtered_entries,
display_str[c2c.display]);
return 0;
}
static struct hist_browser*
perf_c2c_browser__new(struct hists *hists)
{
struct hist_browser *browser = hist_browser__new(hists);
if (browser) {
browser->title = perf_c2c_browser__title;
browser->c2c_filter = true;
}
return browser;
}
static int perf_c2c__hists_browse(struct hists *hists)
{
struct hist_browser *browser;
int key = -1;
static const char help[] =
" d Display cacheline details \n"
" ENTER Toggle callchains (if present) \n"
" q Quit \n";
browser = perf_c2c_browser__new(hists);
if (browser == NULL)
return -1;
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
c2c_browser__update_nr_entries(browser);
while (1) {
key = hist_browser__run(browser, "? - help", true, 0);
switch (key) {
case 'q':
goto out;
case 'd':
perf_c2c__browse_cacheline(browser->he_selection);
break;
case '?':
ui_browser__help_window(&browser->b, help);
break;
default:
break;
}
}
out:
hist_browser__delete(browser);
return 0;
}
static void perf_c2c_display(struct perf_session *session)
{
if (use_browser == 0)
perf_c2c__hists_fprintf(stdout, session);
else
perf_c2c__hists_browse(&c2c.hists.hists);
}
#else
static void perf_c2c_display(struct perf_session *session)
{
use_browser = 0;
perf_c2c__hists_fprintf(stdout, session);
}
#endif /* HAVE_SLANG_SUPPORT */
static char *fill_line(const char *orig, int len)
{
int i, j, olen = strlen(orig);
char *buf;
buf = zalloc(len + 1);
if (!buf)
return NULL;
j = len / 2 - olen / 2;
for (i = 0; i < j - 1; i++)
buf[i] = '-';
buf[i++] = ' ';
strcpy(buf + i, orig);
i += olen;
buf[i++] = ' ';
for (; i < len; i++)
buf[i] = '-';
return buf;
}
static int ui_quirks(void)
{
const char *nodestr = "Data address";
char *buf;
if (!c2c.use_stdio) {
dim_offset.width = 5;
dim_offset.header = header_offset_tui;
nodestr = chk_double_cl ? "Double-CL" : "CL";
}
dim_percent_costly_snoop.header = percent_costly_snoop_header[c2c.display];
/* Fix the zero line for dcacheline column. */
buf = fill_line(chk_double_cl ? "Double-Cacheline" : "Cacheline",
dim_dcacheline.width +
dim_dcacheline_node.width +
dim_dcacheline_count.width + 4);
if (!buf)
return -ENOMEM;
dim_dcacheline.header.line[0].text = buf;
/* Fix the zero line for offset column. */
buf = fill_line(nodestr, dim_offset.width +
dim_offset_node.width +
dim_dcacheline_count.width + 4);
if (!buf)
return -ENOMEM;
dim_offset.header.line[0].text = buf;
return 0;
}
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
const char callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
CALLCHAIN_REPORT_HELP
"\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct callchain_param *callchain = opt->value;
callchain->enabled = !unset;
/*
* --no-call-graph
*/
if (unset) {
symbol_conf.use_callchain = false;
callchain->mode = CHAIN_NONE;
return 0;
}
return parse_callchain_report_opt(arg);
}
static int setup_callchain(struct evlist *evlist)
{
u64 sample_type = evlist__combined_sample_type(evlist);
enum perf_call_graph_mode mode = CALLCHAIN_NONE;
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
(sample_type & PERF_SAMPLE_STACK_USER)) {
mode = CALLCHAIN_DWARF;
dwarf_callchain_users = true;
} else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
mode = CALLCHAIN_LBR;
else if (sample_type & PERF_SAMPLE_CALLCHAIN)
mode = CALLCHAIN_FP;
if (!callchain_param.enabled &&
callchain_param.mode != CHAIN_NONE &&
mode != CALLCHAIN_NONE) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
}
}
if (c2c.stitch_lbr && (mode != CALLCHAIN_LBR)) {
ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
"Please apply --call-graph lbr when recording.\n");
c2c.stitch_lbr = false;
}
callchain_param.record_mode = mode;
callchain_param.min_percent = 0;
return 0;
}
static int setup_display(const char *str)
{
const char *display = str;
if (!strcmp(display, "tot"))
c2c.display = DISPLAY_TOT_HITM;
else if (!strcmp(display, "rmt"))
c2c.display = DISPLAY_RMT_HITM;
else if (!strcmp(display, "lcl"))
c2c.display = DISPLAY_LCL_HITM;
else if (!strcmp(display, "peer"))
c2c.display = DISPLAY_SNP_PEER;
else {
pr_err("failed: unknown display type: %s\n", str);
return -1;
}
return 0;
}
#define for_each_token(__tok, __buf, __sep, __tmp) \
for (__tok = strtok_r(__buf, __sep, &__tmp); __tok; \
__tok = strtok_r(NULL, __sep, &__tmp))
static int build_cl_output(char *cl_sort, bool no_source)
{
char *tok, *tmp, *buf = strdup(cl_sort);
bool add_pid = false;
bool add_tid = false;
bool add_iaddr = false;
bool add_sym = false;
bool add_dso = false;
bool add_src = false;
int ret = 0;
if (!buf)
return -ENOMEM;
for_each_token(tok, buf, ",", tmp) {
if (!strcmp(tok, "tid")) {
add_tid = true;
} else if (!strcmp(tok, "pid")) {
add_pid = true;
} else if (!strcmp(tok, "iaddr")) {
add_iaddr = true;
add_sym = true;
add_dso = true;
add_src = no_source ? false : true;
} else if (!strcmp(tok, "dso")) {
add_dso = true;
} else if (strcmp(tok, "offset")) {
pr_err("unrecognized sort token: %s\n", tok);
ret = -EINVAL;
goto err;
}
}
if (asprintf(&c2c.cl_output,
"%s%s%s%s%s%s%s%s%s%s%s%s",
c2c.use_stdio ? "cl_num_empty," : "",
c2c.display == DISPLAY_SNP_PEER ? "percent_rmt_peer,"
"percent_lcl_peer," :
"percent_rmt_hitm,"
"percent_lcl_hitm,",
"percent_stores_l1hit,"
"percent_stores_l1miss,"
"percent_stores_na,"
"offset,offset_node,dcacheline_count,",
add_pid ? "pid," : "",
add_tid ? "tid," : "",
add_iaddr ? "iaddr," : "",
c2c.display == DISPLAY_SNP_PEER ? "mean_rmt_peer,"
"mean_lcl_peer," :
"mean_rmt,"
"mean_lcl,",
"mean_load,"
"tot_recs,"
"cpucnt,",
add_sym ? "symbol," : "",
add_dso ? "dso," : "",
add_src ? "cl_srcline," : "",
"node") < 0) {
ret = -ENOMEM;
goto err;
}
c2c.show_src = add_src;
err:
free(buf);
return ret;
}
static int setup_coalesce(const char *coalesce, bool no_source)
{
const char *c = coalesce ?: coalesce_default;
const char *sort_str = NULL;
if (asprintf(&c2c.cl_sort, "offset,%s", c) < 0)
return -ENOMEM;
if (build_cl_output(c2c.cl_sort, no_source))
return -1;
if (c2c.display == DISPLAY_TOT_HITM)
sort_str = "tot_hitm";
else if (c2c.display == DISPLAY_RMT_HITM)
sort_str = "rmt_hitm,lcl_hitm";
else if (c2c.display == DISPLAY_LCL_HITM)
sort_str = "lcl_hitm,rmt_hitm";
else if (c2c.display == DISPLAY_SNP_PEER)
sort_str = "tot_peer";
if (asprintf(&c2c.cl_resort, "offset,%s", sort_str) < 0)
return -ENOMEM;
pr_debug("coalesce sort fields: %s\n", c2c.cl_sort);
pr_debug("coalesce resort fields: %s\n", c2c.cl_resort);
pr_debug("coalesce output fields: %s\n", c2c.cl_output);
return 0;
}
static int perf_c2c__report(int argc, const char **argv)
{
struct itrace_synth_opts itrace_synth_opts = {
.set = true,
.mem = true, /* Only enable memory event */
.default_no_sample = true,
};
struct perf_session *session;
struct ui_progress prog;
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char *display = NULL;
const char *coalesce = NULL;
bool no_source = false;
const struct option options[] = {
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING('i', "input", &input_name, "file",
"the input file to process"),
OPT_INCR('N', "node-info", &c2c.node_info,
"show extra node info in report (repeat for more info)"),
OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"),
OPT_BOOLEAN(0, "stats", &c2c.stats_only,
"Display only statistic tables (implies --stdio)"),
OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full,
"Display full length of symbols"),
OPT_BOOLEAN(0, "no-source", &no_source,
"Do not display Source Line column"),
OPT_BOOLEAN(0, "show-all", &c2c.show_all,
"Show all captured HITM lines."),
OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
callchain_help, &parse_callchain_opt,
callchain_default_opt),
OPT_STRING('d', "display", &display, "Switch HITM output type", "tot,lcl,rmt,peer"),
OPT_STRING('c', "coalesce", &coalesce, "coalesce fields",
"coalesce fields: pid,tid,iaddr,dso"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_BOOLEAN(0, "stitch-lbr", &c2c.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPT_BOOLEAN(0, "double-cl", &chk_double_cl, "Detect adjacent cacheline false sharing"),
OPT_PARENT(c2c_options),
OPT_END()
};
int err = 0;
const char *output_str, *sort_str = NULL;
argc = parse_options(argc, argv, options, report_c2c_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (argc)
usage_with_options(report_c2c_usage, options);
#ifndef HAVE_SLANG_SUPPORT
c2c.use_stdio = true;
#endif
if (c2c.stats_only)
c2c.use_stdio = true;
err = symbol__validate_sym_arguments();
if (err)
goto out;
if (!input_name || !strlen(input_name))
input_name = "perf.data";
data.path = input_name;
data.force = symbol_conf.force;
session = perf_session__new(&data, &c2c.tool);
if (IS_ERR(session)) {
err = PTR_ERR(session);
pr_debug("Error creating perf session\n");
goto out;
}
/*
* Use the 'tot' as default display type if user doesn't specify it;
* since Arm64 platform doesn't support HITMs flag, use 'peer' as the
* default display type.
*/
if (!display) {
if (!strcmp(perf_env__arch(&session->header.env), "arm64"))
display = "peer";
else
display = "tot";
}
err = setup_display(display);
if (err)
goto out_session;
err = setup_coalesce(coalesce, no_source);
if (err) {
pr_debug("Failed to initialize hists\n");
goto out_session;
}
err = c2c_hists__init(&c2c.hists, "dcacheline", 2);
if (err) {
pr_debug("Failed to initialize hists\n");
goto out_session;
}
session->itrace_synth_opts = &itrace_synth_opts;
err = setup_nodes(session);
if (err) {
pr_err("Failed setup nodes\n");
goto out_session;
}
err = mem2node__init(&c2c.mem2node, &session->header.env);
if (err)
goto out_session;
err = setup_callchain(session->evlist);
if (err)
goto out_mem2node;
if (symbol__init(&session->header.env) < 0)
goto out_mem2node;
/* No pipe support at the moment. */
if (perf_data__is_pipe(session->data)) {
pr_debug("No pipe support at the moment.\n");
goto out_mem2node;
}
if (c2c.use_stdio)
use_browser = 0;
else
use_browser = 1;
setup_browser(false);
err = perf_session__process_events(session);
if (err) {
pr_err("failed to process sample\n");
goto out_mem2node;
}
if (c2c.display != DISPLAY_SNP_PEER)
output_str = "cl_idx,"
"dcacheline,"
"dcacheline_node,"
"dcacheline_count,"
"percent_costly_snoop,"
"tot_hitm,lcl_hitm,rmt_hitm,"
"tot_recs,"
"tot_loads,"
"tot_stores,"
"stores_l1hit,stores_l1miss,stores_na,"
"ld_fbhit,ld_l1hit,ld_l2hit,"
"ld_lclhit,lcl_hitm,"
"ld_rmthit,rmt_hitm,"
"dram_lcl,dram_rmt";
else
output_str = "cl_idx,"
"dcacheline,"
"dcacheline_node,"
"dcacheline_count,"
"percent_costly_snoop,"
"tot_peer,lcl_peer,rmt_peer,"
"tot_recs,"
"tot_loads,"
"tot_stores,"
"stores_l1hit,stores_l1miss,stores_na,"
"ld_fbhit,ld_l1hit,ld_l2hit,"
"ld_lclhit,lcl_hitm,"
"ld_rmthit,rmt_hitm,"
"dram_lcl,dram_rmt";
if (c2c.display == DISPLAY_TOT_HITM)
sort_str = "tot_hitm";
else if (c2c.display == DISPLAY_RMT_HITM)
sort_str = "rmt_hitm";
else if (c2c.display == DISPLAY_LCL_HITM)
sort_str = "lcl_hitm";
else if (c2c.display == DISPLAY_SNP_PEER)
sort_str = "tot_peer";
c2c_hists__reinit(&c2c.hists, output_str, sort_str);
ui_progress__init(&prog, c2c.hists.hists.nr_entries, "Sorting...");
hists__collapse_resort(&c2c.hists.hists, NULL);
hists__output_resort_cb(&c2c.hists.hists, &prog, resort_shared_cl_cb);
hists__iterate_cb(&c2c.hists.hists, resort_cl_cb);
ui_progress__finish();
if (ui_quirks()) {
pr_err("failed to setup UI\n");
goto out_mem2node;
}
perf_c2c_display(session);
out_mem2node:
mem2node__exit(&c2c.mem2node);
out_session:
perf_session__delete(session);
out:
return err;
}
static int parse_record_events(const struct option *opt,
const char *str, int unset __maybe_unused)
{
bool *event_set = (bool *) opt->value;
if (!strcmp(str, "list")) {
perf_mem_events__list();
exit(0);
}
if (perf_mem_events__parse(str))
exit(-1);
*event_set = true;
return 0;
}
static const char * const __usage_record[] = {
"perf c2c record [<options>] [<command>]",
"perf c2c record [<options>] -- <command> [<options>]",
NULL
};
static const char * const *record_mem_usage = __usage_record;
static int perf_c2c__record(int argc, const char **argv)
{
int rec_argc, i = 0, j, rec_tmp_nr = 0;
const char **rec_argv;
char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
bool event_set = false;
struct perf_mem_event *e;
struct option options[] = {
OPT_CALLBACK('e', "event", &event_set, "event",
"event selector. Use 'perf c2c record -e list' to list available events",
parse_record_events),
OPT_BOOLEAN('u', "all-user", &all_user, "collect only user level data"),
OPT_BOOLEAN('k', "all-kernel", &all_kernel, "collect only kernel level data"),
OPT_UINTEGER('l', "ldlat", &perf_mem_events__loads_ldlat, "setup mem-loads latency"),
OPT_PARENT(c2c_options),
OPT_END()
};
if (perf_mem_events__init()) {
pr_err("failed: memory events not supported\n");
return -1;
}
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
/* Max number of arguments multiplied by number of PMUs that can support them. */
rec_argc = argc + 11 * perf_pmus__num_mem_pmus();
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
rec_tmp = calloc(rec_argc + 1, sizeof(char *));
if (!rec_tmp) {
free(rec_argv);
return -1;
}
rec_argv[i++] = "record";
if (!event_set) {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
/*
* The load and store operations are required, use the event
* PERF_MEM_EVENTS__LOAD_STORE if it is supported.
*/
if (e->tag) {
e->record = true;
rec_argv[i++] = "-W";
} else {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
e->record = true;
e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
e->record = true;
}
}
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
if (e->record)
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
rec_argv[i++] = "--phys-data";
rec_argv[i++] = "--sample-cpu";
ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &rec_tmp_nr);
if (ret)
goto out;
if (all_user)
rec_argv[i++] = "--all-user";
if (all_kernel)
rec_argv[i++] = "--all-kernel";
for (j = 0; j < argc; j++, i++)
rec_argv[i] = argv[j];
if (verbose > 0) {
pr_debug("calling: ");
j = 0;
while (rec_argv[j]) {
pr_debug("%s ", rec_argv[j]);
j++;
}
pr_debug("\n");
}
ret = cmd_record(i, rec_argv);
out:
for (i = 0; i < rec_tmp_nr; i++)
free(rec_tmp[i]);
free(rec_tmp);
free(rec_argv);
return ret;
}
int cmd_c2c(int argc, const char **argv)
{
argc = parse_options(argc, argv, c2c_options, c2c_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(c2c_usage, c2c_options);
if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
return perf_c2c__record(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
return perf_c2c__report(argc, argv);
} else {
usage_with_options(c2c_usage, c2c_options);
}
return 0;
}
| linux-master | tools/perf/builtin-c2c.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <string.h>
#define VDSO__MAP_NAME "[vdso]"
/*
* Include definition of find_map() also used in util/vdso.c for
* building perf.
*/
#include "util/find-map.c"
int main(void)
{
void *start, *end;
size_t size, written;
if (find_map(&start, &end, VDSO__MAP_NAME))
return 1;
size = end - start;
while (size) {
written = fwrite(start, 1, size, stdout);
if (!written)
return 1;
start += written;
size -= written;
}
if (fflush(stdout))
return 1;
return 0;
}
| linux-master | tools/perf/perf-read-vdso.c |
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
#include "color.h"
#include "util/debug.h"
#include "util/header.h"
#include <tools/config.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <subcmd/parse-options.h>
struct version {
bool build_options;
};
static struct version version;
static struct option version_options[] = {
OPT_BOOLEAN(0, "build-options", &version.build_options,
"display the build options"),
OPT_END(),
};
static const char * const version_usage[] = {
"perf version [<options>]",
NULL
};
static void on_off_print(const char *status)
{
printf("[ ");
if (!strcmp(status, "OFF"))
color_fprintf(stdout, PERF_COLOR_RED, "%-3s", status);
else
color_fprintf(stdout, PERF_COLOR_GREEN, "%-3s", status);
printf(" ]");
}
static void status_print(const char *name, const char *macro,
const char *status)
{
printf("%22s: ", name);
on_off_print(status);
printf(" # %s\n", macro);
}
#define STATUS(__d, __m) \
do { \
if (IS_BUILTIN(__d)) \
status_print(#__m, #__d, "on"); \
else \
status_print(#__m, #__d, "OFF"); \
} while (0)
static void library_status(void)
{
STATUS(HAVE_DWARF_SUPPORT, dwarf);
STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
#ifndef HAVE_SYSCALL_TABLE_SUPPORT
STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
#endif
STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table);
STATUS(HAVE_LIBBFD_SUPPORT, libbfd);
STATUS(HAVE_DEBUGINFOD_SUPPORT, debuginfod);
STATUS(HAVE_LIBELF_SUPPORT, libelf);
STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
STATUS(HAVE_LIBNUMA_SUPPORT, numa_num_possible_cpus);
STATUS(HAVE_LIBPERL_SUPPORT, libperl);
STATUS(HAVE_LIBPYTHON_SUPPORT, libpython);
STATUS(HAVE_SLANG_SUPPORT, libslang);
STATUS(HAVE_LIBCRYPTO_SUPPORT, libcrypto);
STATUS(HAVE_LIBUNWIND_SUPPORT, libunwind);
STATUS(HAVE_DWARF_SUPPORT, libdw-dwarf-unwind);
STATUS(HAVE_ZLIB_SUPPORT, zlib);
STATUS(HAVE_LZMA_SUPPORT, lzma);
STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid);
STATUS(HAVE_LIBBPF_SUPPORT, bpf);
STATUS(HAVE_AIO_SUPPORT, aio);
STATUS(HAVE_ZSTD_SUPPORT, zstd);
STATUS(HAVE_LIBPFM, libpfm4);
STATUS(HAVE_LIBTRACEEVENT, libtraceevent);
}
int cmd_version(int argc, const char **argv)
{
argc = parse_options(argc, argv, version_options, version_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
printf("perf version %s\n", perf_version_string);
if (version.build_options || verbose > 0)
library_status();
return 0;
}
| linux-master | tools/perf/builtin-version.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-ftrace.c
*
* Copyright (c) 2013 LG Electronics, Namhyung Kim <[email protected]>
* Copyright (c) 2020 Changbin Du <[email protected]>, significant enhancement.
*/
#include "builtin.h"
#include <errno.h>
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <fcntl.h>
#include <math.h>
#include <poll.h>
#include <ctype.h>
#include <linux/capability.h>
#include <linux/string.h>
#include "debug.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include <api/fs/tracing_path.h>
#include "evlist.h"
#include "target.h"
#include "cpumap.h"
#include "thread_map.h"
#include "strfilter.h"
#include "util/cap.h"
#include "util/config.h"
#include "util/ftrace.h"
#include "util/units.h"
#include "util/parse-sublevel-options.h"
#define DEFAULT_TRACER "function_graph"
static volatile sig_atomic_t workload_exec_errno;
static volatile sig_atomic_t done;
static void sig_handler(int sig __maybe_unused)
{
done = true;
}
/*
* evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
* we asked by setting its exec_error to the function below,
* ftrace__workload_exec_failed_signal.
*
* XXX We need to handle this more appropriately, emitting an error, etc.
*/
static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
siginfo_t *info __maybe_unused,
void *ucontext __maybe_unused)
{
workload_exec_errno = info->si_value.sival_int;
done = true;
}
static int __write_tracing_file(const char *name, const char *val, bool append)
{
char *file;
int fd, ret = -1;
ssize_t size = strlen(val);
int flags = O_WRONLY;
char errbuf[512];
char *val_copy;
file = get_tracing_file(name);
if (!file) {
pr_debug("cannot get tracing file: %s\n", name);
return -1;
}
if (append)
flags |= O_APPEND;
else
flags |= O_TRUNC;
fd = open(file, flags);
if (fd < 0) {
pr_debug("cannot open tracing file: %s: %s\n",
name, str_error_r(errno, errbuf, sizeof(errbuf)));
goto out;
}
/*
* Copy the original value and append a '\n'. Without this,
* the kernel can hide possible errors.
*/
val_copy = strdup(val);
if (!val_copy)
goto out_close;
val_copy[size] = '\n';
if (write(fd, val_copy, size + 1) == size + 1)
ret = 0;
else
pr_debug("write '%s' to tracing/%s failed: %s\n",
val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
free(val_copy);
out_close:
close(fd);
out:
put_tracing_file(file);
return ret;
}
static int write_tracing_file(const char *name, const char *val)
{
return __write_tracing_file(name, val, false);
}
static int append_tracing_file(const char *name, const char *val)
{
return __write_tracing_file(name, val, true);
}
static int read_tracing_file_to_stdout(const char *name)
{
char buf[4096];
char *file;
int fd;
int ret = -1;
file = get_tracing_file(name);
if (!file) {
pr_debug("cannot get tracing file: %s\n", name);
return -1;
}
fd = open(file, O_RDONLY);
if (fd < 0) {
pr_debug("cannot open tracing file: %s: %s\n",
name, str_error_r(errno, buf, sizeof(buf)));
goto out;
}
/* read contents to stdout */
while (true) {
int n = read(fd, buf, sizeof(buf));
if (n == 0)
break;
else if (n < 0)
goto out_close;
if (fwrite(buf, n, 1, stdout) != 1)
goto out_close;
}
ret = 0;
out_close:
close(fd);
out:
put_tracing_file(file);
return ret;
}
static int read_tracing_file_by_line(const char *name,
void (*cb)(char *str, void *arg),
void *cb_arg)
{
char *line = NULL;
size_t len = 0;
char *file;
FILE *fp;
file = get_tracing_file(name);
if (!file) {
pr_debug("cannot get tracing file: %s\n", name);
return -1;
}
fp = fopen(file, "r");
if (fp == NULL) {
pr_debug("cannot open tracing file: %s\n", name);
put_tracing_file(file);
return -1;
}
while (getline(&line, &len, fp) != -1) {
cb(line, cb_arg);
}
if (line)
free(line);
fclose(fp);
put_tracing_file(file);
return 0;
}
static int write_tracing_file_int(const char *name, int value)
{
char buf[16];
snprintf(buf, sizeof(buf), "%d", value);
if (write_tracing_file(name, buf) < 0)
return -1;
return 0;
}
static int write_tracing_option_file(const char *name, const char *val)
{
char *file;
int ret;
if (asprintf(&file, "options/%s", name) < 0)
return -1;
ret = __write_tracing_file(file, val, false);
free(file);
return ret;
}
static int reset_tracing_cpu(void);
static void reset_tracing_filters(void);
static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
{
write_tracing_option_file("function-fork", "0");
write_tracing_option_file("func_stack_trace", "0");
write_tracing_option_file("sleep-time", "1");
write_tracing_option_file("funcgraph-irqs", "1");
write_tracing_option_file("funcgraph-proc", "0");
write_tracing_option_file("funcgraph-abstime", "0");
write_tracing_option_file("latency-format", "0");
write_tracing_option_file("irq-info", "0");
}
static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
{
if (write_tracing_file("tracing_on", "0") < 0)
return -1;
if (write_tracing_file("current_tracer", "nop") < 0)
return -1;
if (write_tracing_file("set_ftrace_pid", " ") < 0)
return -1;
if (reset_tracing_cpu() < 0)
return -1;
if (write_tracing_file("max_graph_depth", "0") < 0)
return -1;
if (write_tracing_file("tracing_thresh", "0") < 0)
return -1;
reset_tracing_filters();
reset_tracing_options(ftrace);
return 0;
}
static int set_tracing_pid(struct perf_ftrace *ftrace)
{
int i;
char buf[16];
if (target__has_cpu(&ftrace->target))
return 0;
for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
scnprintf(buf, sizeof(buf), "%d",
perf_thread_map__pid(ftrace->evlist->core.threads, i));
if (append_tracing_file("set_ftrace_pid", buf) < 0)
return -1;
}
return 0;
}
static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
{
char *cpumask;
size_t mask_size;
int ret;
int last_cpu;
last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
cpumask = malloc(mask_size);
if (cpumask == NULL) {
pr_debug("failed to allocate cpu mask\n");
return -1;
}
cpu_map__snprint_mask(cpumap, cpumask, mask_size);
ret = write_tracing_file("tracing_cpumask", cpumask);
free(cpumask);
return ret;
}
static int set_tracing_cpu(struct perf_ftrace *ftrace)
{
struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
if (!target__has_cpu(&ftrace->target))
return 0;
return set_tracing_cpumask(cpumap);
}
static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
{
if (!ftrace->func_stack_trace)
return 0;
if (write_tracing_option_file("func_stack_trace", "1") < 0)
return -1;
return 0;
}
static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
{
if (!ftrace->func_irq_info)
return 0;
if (write_tracing_option_file("irq-info", "1") < 0)
return -1;
return 0;
}
static int reset_tracing_cpu(void)
{
struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
int ret;
ret = set_tracing_cpumask(cpumap);
perf_cpu_map__put(cpumap);
return ret;
}
static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
{
struct filter_entry *pos;
list_for_each_entry(pos, funcs, list) {
if (append_tracing_file(filter_file, pos->name) < 0)
return -1;
}
return 0;
}
static int set_tracing_filters(struct perf_ftrace *ftrace)
{
int ret;
ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
if (ret < 0)
return ret;
ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
if (ret < 0)
return ret;
ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
if (ret < 0)
return ret;
/* old kernels do not have this filter */
__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
return ret;
}
static void reset_tracing_filters(void)
{
write_tracing_file("set_ftrace_filter", " ");
write_tracing_file("set_ftrace_notrace", " ");
write_tracing_file("set_graph_function", " ");
write_tracing_file("set_graph_notrace", " ");
}
static int set_tracing_depth(struct perf_ftrace *ftrace)
{
if (ftrace->graph_depth == 0)
return 0;
if (ftrace->graph_depth < 0) {
pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
return -1;
}
if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
return -1;
return 0;
}
static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
{
int ret;
if (ftrace->percpu_buffer_size == 0)
return 0;
ret = write_tracing_file_int("buffer_size_kb",
ftrace->percpu_buffer_size / 1024);
if (ret < 0)
return ret;
return 0;
}
static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
{
if (!ftrace->inherit)
return 0;
if (write_tracing_option_file("function-fork", "1") < 0)
return -1;
return 0;
}
static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
{
if (!ftrace->graph_nosleep_time)
return 0;
if (write_tracing_option_file("sleep-time", "0") < 0)
return -1;
return 0;
}
static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
{
if (!ftrace->graph_noirqs)
return 0;
if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
return -1;
return 0;
}
static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
{
if (!ftrace->graph_verbose)
return 0;
if (write_tracing_option_file("funcgraph-proc", "1") < 0)
return -1;
if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
return -1;
if (write_tracing_option_file("latency-format", "1") < 0)
return -1;
return 0;
}
static int set_tracing_thresh(struct perf_ftrace *ftrace)
{
int ret;
if (ftrace->graph_thresh == 0)
return 0;
ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
if (ret < 0)
return ret;
return 0;
}
static int set_tracing_options(struct perf_ftrace *ftrace)
{
if (set_tracing_pid(ftrace) < 0) {
pr_err("failed to set ftrace pid\n");
return -1;
}
if (set_tracing_cpu(ftrace) < 0) {
pr_err("failed to set tracing cpumask\n");
return -1;
}
if (set_tracing_func_stack_trace(ftrace) < 0) {
pr_err("failed to set tracing option func_stack_trace\n");
return -1;
}
if (set_tracing_func_irqinfo(ftrace) < 0) {
pr_err("failed to set tracing option irq-info\n");
return -1;
}
if (set_tracing_filters(ftrace) < 0) {
pr_err("failed to set tracing filters\n");
return -1;
}
if (set_tracing_depth(ftrace) < 0) {
pr_err("failed to set graph depth\n");
return -1;
}
if (set_tracing_percpu_buffer_size(ftrace) < 0) {
pr_err("failed to set tracing per-cpu buffer size\n");
return -1;
}
if (set_tracing_trace_inherit(ftrace) < 0) {
pr_err("failed to set tracing option function-fork\n");
return -1;
}
if (set_tracing_sleep_time(ftrace) < 0) {
pr_err("failed to set tracing option sleep-time\n");
return -1;
}
if (set_tracing_funcgraph_irqs(ftrace) < 0) {
pr_err("failed to set tracing option funcgraph-irqs\n");
return -1;
}
if (set_tracing_funcgraph_verbose(ftrace) < 0) {
pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
return -1;
}
if (set_tracing_thresh(ftrace) < 0) {
pr_err("failed to set tracing thresh\n");
return -1;
}
return 0;
}
static void select_tracer(struct perf_ftrace *ftrace)
{
bool graph = !list_empty(&ftrace->graph_funcs) ||
!list_empty(&ftrace->nograph_funcs);
bool func = !list_empty(&ftrace->filters) ||
!list_empty(&ftrace->notrace);
/* The function_graph has priority over function tracer. */
if (graph)
ftrace->tracer = "function_graph";
else if (func)
ftrace->tracer = "function";
/* Otherwise, the default tracer is used. */
pr_debug("%s tracer is used\n", ftrace->tracer);
}
static int __cmd_ftrace(struct perf_ftrace *ftrace)
{
char *trace_file;
int trace_fd;
char buf[4096];
struct pollfd pollfd = {
.events = POLLIN,
};
if (!(perf_cap__capable(CAP_PERFMON) ||
perf_cap__capable(CAP_SYS_ADMIN))) {
pr_err("ftrace only works for %s!\n",
#ifdef HAVE_LIBCAP_SUPPORT
"users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
#else
"root"
#endif
);
return -1;
}
select_tracer(ftrace);
if (reset_tracing_files(ftrace) < 0) {
pr_err("failed to reset ftrace\n");
goto out;
}
/* reset ftrace buffer */
if (write_tracing_file("trace", "0") < 0)
goto out;
if (set_tracing_options(ftrace) < 0)
goto out_reset;
if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
goto out_reset;
}
setup_pager();
trace_file = get_tracing_file("trace_pipe");
if (!trace_file) {
pr_err("failed to open trace_pipe\n");
goto out_reset;
}
trace_fd = open(trace_file, O_RDONLY);
put_tracing_file(trace_file);
if (trace_fd < 0) {
pr_err("failed to open trace_pipe\n");
goto out_reset;
}
fcntl(trace_fd, F_SETFL, O_NONBLOCK);
pollfd.fd = trace_fd;
/* display column headers */
read_tracing_file_to_stdout("trace");
if (!ftrace->target.initial_delay) {
if (write_tracing_file("tracing_on", "1") < 0) {
pr_err("can't enable tracing\n");
goto out_close_fd;
}
}
evlist__start_workload(ftrace->evlist);
if (ftrace->target.initial_delay > 0) {
usleep(ftrace->target.initial_delay * 1000);
if (write_tracing_file("tracing_on", "1") < 0) {
pr_err("can't enable tracing\n");
goto out_close_fd;
}
}
while (!done) {
if (poll(&pollfd, 1, -1) < 0)
break;
if (pollfd.revents & POLLIN) {
int n = read(trace_fd, buf, sizeof(buf));
if (n < 0)
break;
if (fwrite(buf, n, 1, stdout) != 1)
break;
/* flush output since stdout is in full buffering mode due to pager */
fflush(stdout);
}
}
write_tracing_file("tracing_on", "0");
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
/* flush stdout first so below error msg appears at the end. */
fflush(stdout);
pr_err("workload failed: %s\n", emsg);
goto out_close_fd;
}
/* read remaining buffer contents */
while (true) {
int n = read(trace_fd, buf, sizeof(buf));
if (n <= 0)
break;
if (fwrite(buf, n, 1, stdout) != 1)
break;
}
out_close_fd:
close(trace_fd);
out_reset:
reset_tracing_files(ftrace);
out:
return (done && !workload_exec_errno) ? 0 : -1;
}
static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf,
bool use_nsec)
{
char *p, *q;
char *unit;
double num;
int i;
/* ensure NUL termination */
buf[len] = '\0';
/* handle data line by line */
for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) {
*q = '\0';
/* move it to the line buffer */
strcat(linebuf, p);
/*
* parse trace output to get function duration like in
*
* # tracer: function_graph
* #
* # CPU DURATION FUNCTION CALLS
* # | | | | | | |
* 1) + 10.291 us | do_filp_open();
* 1) 4.889 us | do_filp_open();
* 1) 6.086 us | do_filp_open();
*
*/
if (linebuf[0] == '#')
goto next;
/* ignore CPU */
p = strchr(linebuf, ')');
if (p == NULL)
p = linebuf;
while (*p && !isdigit(*p) && (*p != '|'))
p++;
/* no duration */
if (*p == '\0' || *p == '|')
goto next;
num = strtod(p, &unit);
if (!unit || strncmp(unit, " us", 3))
goto next;
if (use_nsec)
num *= 1000;
i = log2(num);
if (i < 0)
i = 0;
if (i >= NUM_BUCKET)
i = NUM_BUCKET - 1;
buckets[i]++;
next:
/* empty the line buffer for the next output */
linebuf[0] = '\0';
}
/* preserve any remaining output (before newline) */
strcat(linebuf, p);
}
static void display_histogram(int buckets[], bool use_nsec)
{
int i;
int total = 0;
int bar_total = 46; /* to fit in 80 column */
char bar[] = "###############################################";
int bar_len;
for (i = 0; i < NUM_BUCKET; i++)
total += buckets[i];
if (total == 0) {
printf("No data found\n");
return;
}
printf("# %14s | %10s | %-*s |\n",
" DURATION ", "COUNT", bar_total, "GRAPH");
bar_len = buckets[0] * bar_total / total;
printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, "");
for (i = 1; i < NUM_BUCKET - 1; i++) {
int start = (1 << (i - 1));
int stop = 1 << i;
const char *unit = use_nsec ? "ns" : "us";
if (start >= 1024) {
start >>= 10;
stop >>= 10;
unit = use_nsec ? "us" : "ms";
}
bar_len = buckets[i] * bar_total / total;
printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
start, stop, unit, buckets[i], bar_len, bar,
bar_total - bar_len, "");
}
bar_len = buckets[NUM_BUCKET - 1] * bar_total / total;
printf(" %4d - %-4s %s | %10d | %.*s%*s |\n",
1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1],
bar_len, bar, bar_total - bar_len, "");
}
static int prepare_func_latency(struct perf_ftrace *ftrace)
{
char *trace_file;
int fd;
if (ftrace->target.use_bpf)
return perf_ftrace__latency_prepare_bpf(ftrace);
if (reset_tracing_files(ftrace) < 0) {
pr_err("failed to reset ftrace\n");
return -1;
}
/* reset ftrace buffer */
if (write_tracing_file("trace", "0") < 0)
return -1;
if (set_tracing_options(ftrace) < 0)
return -1;
/* force to use the function_graph tracer to track duration */
if (write_tracing_file("current_tracer", "function_graph") < 0) {
pr_err("failed to set current_tracer to function_graph\n");
return -1;
}
trace_file = get_tracing_file("trace_pipe");
if (!trace_file) {
pr_err("failed to open trace_pipe\n");
return -1;
}
fd = open(trace_file, O_RDONLY);
if (fd < 0)
pr_err("failed to open trace_pipe\n");
put_tracing_file(trace_file);
return fd;
}
static int start_func_latency(struct perf_ftrace *ftrace)
{
if (ftrace->target.use_bpf)
return perf_ftrace__latency_start_bpf(ftrace);
if (write_tracing_file("tracing_on", "1") < 0) {
pr_err("can't enable tracing\n");
return -1;
}
return 0;
}
static int stop_func_latency(struct perf_ftrace *ftrace)
{
if (ftrace->target.use_bpf)
return perf_ftrace__latency_stop_bpf(ftrace);
write_tracing_file("tracing_on", "0");
return 0;
}
static int read_func_latency(struct perf_ftrace *ftrace, int buckets[])
{
if (ftrace->target.use_bpf)
return perf_ftrace__latency_read_bpf(ftrace, buckets);
return 0;
}
static int cleanup_func_latency(struct perf_ftrace *ftrace)
{
if (ftrace->target.use_bpf)
return perf_ftrace__latency_cleanup_bpf(ftrace);
reset_tracing_files(ftrace);
return 0;
}
static int __cmd_latency(struct perf_ftrace *ftrace)
{
int trace_fd;
char buf[4096];
char line[256];
struct pollfd pollfd = {
.events = POLLIN,
};
int buckets[NUM_BUCKET] = { };
if (!(perf_cap__capable(CAP_PERFMON) ||
perf_cap__capable(CAP_SYS_ADMIN))) {
pr_err("ftrace only works for %s!\n",
#ifdef HAVE_LIBCAP_SUPPORT
"users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
#else
"root"
#endif
);
return -1;
}
trace_fd = prepare_func_latency(ftrace);
if (trace_fd < 0)
goto out;
fcntl(trace_fd, F_SETFL, O_NONBLOCK);
pollfd.fd = trace_fd;
if (start_func_latency(ftrace) < 0)
goto out;
evlist__start_workload(ftrace->evlist);
line[0] = '\0';
while (!done) {
if (poll(&pollfd, 1, -1) < 0)
break;
if (pollfd.revents & POLLIN) {
int n = read(trace_fd, buf, sizeof(buf) - 1);
if (n < 0)
break;
make_histogram(buckets, buf, n, line, ftrace->use_nsec);
}
}
stop_func_latency(ftrace);
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
pr_err("workload failed: %s\n", emsg);
goto out;
}
/* read remaining buffer contents */
while (!ftrace->target.use_bpf) {
int n = read(trace_fd, buf, sizeof(buf) - 1);
if (n <= 0)
break;
make_histogram(buckets, buf, n, line, ftrace->use_nsec);
}
read_func_latency(ftrace, buckets);
display_histogram(buckets, ftrace->use_nsec);
out:
close(trace_fd);
cleanup_func_latency(ftrace);
return (done && !workload_exec_errno) ? 0 : -1;
}
static int perf_ftrace_config(const char *var, const char *value, void *cb)
{
struct perf_ftrace *ftrace = cb;
if (!strstarts(var, "ftrace."))
return 0;
if (strcmp(var, "ftrace.tracer"))
return -1;
if (!strcmp(value, "function_graph") ||
!strcmp(value, "function")) {
ftrace->tracer = value;
return 0;
}
pr_err("Please select \"function_graph\" (default) or \"function\"\n");
return -1;
}
static void list_function_cb(char *str, void *arg)
{
struct strfilter *filter = (struct strfilter *)arg;
if (strfilter__compare(filter, str))
printf("%s", str);
}
static int opt_list_avail_functions(const struct option *opt __maybe_unused,
const char *str, int unset)
{
struct strfilter *filter;
const char *err = NULL;
int ret;
if (unset || !str)
return -1;
filter = strfilter__new(str, &err);
if (!filter)
return err ? -EINVAL : -ENOMEM;
ret = strfilter__or(filter, str, &err);
if (ret == -EINVAL) {
pr_err("Filter parse error at %td.\n", err - str + 1);
pr_err("Source: \"%s\"\n", str);
pr_err(" %*c\n", (int)(err - str + 1), '^');
strfilter__delete(filter);
return ret;
}
ret = read_tracing_file_by_line("available_filter_functions",
list_function_cb, filter);
strfilter__delete(filter);
if (ret < 0)
return ret;
exit(0);
}
static int parse_filter_func(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct list_head *head = opt->value;
struct filter_entry *entry;
entry = malloc(sizeof(*entry) + strlen(str) + 1);
if (entry == NULL)
return -ENOMEM;
strcpy(entry->name, str);
list_add_tail(&entry->list, head);
return 0;
}
static void delete_filter_func(struct list_head *head)
{
struct filter_entry *pos, *tmp;
list_for_each_entry_safe(pos, tmp, head, list) {
list_del_init(&pos->list);
free(pos);
}
}
static int parse_buffer_size(const struct option *opt,
const char *str, int unset)
{
unsigned long *s = (unsigned long *)opt->value;
static struct parse_tag tags_size[] = {
{ .tag = 'B', .mult = 1 },
{ .tag = 'K', .mult = 1 << 10 },
{ .tag = 'M', .mult = 1 << 20 },
{ .tag = 'G', .mult = 1 << 30 },
{ .tag = 0 },
};
unsigned long val;
if (unset) {
*s = 0;
return 0;
}
val = parse_tag_value(str, tags_size);
if (val != (unsigned long) -1) {
if (val < 1024) {
pr_err("buffer size too small, must larger than 1KB.");
return -1;
}
*s = val;
return 0;
}
return -1;
}
static int parse_func_tracer_opts(const struct option *opt,
const char *str, int unset)
{
int ret;
struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
struct sublevel_option func_tracer_opts[] = {
{ .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
{ .name = "irq-info", .value_ptr = &ftrace->func_irq_info },
{ .name = NULL, }
};
if (unset)
return 0;
ret = perf_parse_sublevel_options(str, func_tracer_opts);
if (ret)
return ret;
return 0;
}
static int parse_graph_tracer_opts(const struct option *opt,
const char *str, int unset)
{
int ret;
struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
struct sublevel_option graph_tracer_opts[] = {
{ .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
{ .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
{ .name = "verbose", .value_ptr = &ftrace->graph_verbose },
{ .name = "thresh", .value_ptr = &ftrace->graph_thresh },
{ .name = "depth", .value_ptr = &ftrace->graph_depth },
{ .name = NULL, }
};
if (unset)
return 0;
ret = perf_parse_sublevel_options(str, graph_tracer_opts);
if (ret)
return ret;
return 0;
}
enum perf_ftrace_subcommand {
PERF_FTRACE_NONE,
PERF_FTRACE_TRACE,
PERF_FTRACE_LATENCY,
};
int cmd_ftrace(int argc, const char **argv)
{
int ret;
int (*cmd_func)(struct perf_ftrace *) = NULL;
struct perf_ftrace ftrace = {
.tracer = DEFAULT_TRACER,
.target = { .uid = UINT_MAX, },
};
const struct option common_options[] = {
OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
"Trace on existing process id"),
/* TODO: Add short option -t after -t/--tracer can be removed. */
OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
"Trace on existing thread id (exclusive to --pid)"),
OPT_INCR('v', "verbose", &verbose,
"Be more verbose"),
OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
"System-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
"List of cpus to monitor"),
OPT_END()
};
const struct option ftrace_options[] = {
OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
"Tracer to use: function_graph(default) or function"),
OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
"Show available functions to filter",
opt_list_avail_functions, "*"),
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
"Trace given functions using function tracer",
parse_filter_func),
OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
"Do not trace given functions", parse_filter_func),
OPT_CALLBACK(0, "func-opts", &ftrace, "options",
"Function tracer options, available options: call-graph,irq-info",
parse_func_tracer_opts),
OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
"Trace given functions using function_graph tracer",
parse_filter_func),
OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
"Set nograph filter on given functions", parse_filter_func),
OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
"Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
parse_graph_tracer_opts),
OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
"Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
"Trace children processes"),
OPT_INTEGER('D', "delay", &ftrace.target.initial_delay,
"Number of milliseconds to wait before starting tracing after program start"),
OPT_PARENT(common_options),
};
const struct option latency_options[] = {
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
"Show latency of given function", parse_filter_func),
#ifdef HAVE_BPF_SKEL
OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
"Use BPF to measure function latency"),
#endif
OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec,
"Use nano-second histogram"),
OPT_PARENT(common_options),
};
const struct option *options = ftrace_options;
const char * const ftrace_usage[] = {
"perf ftrace [<options>] [<command>]",
"perf ftrace [<options>] -- [<command>] [<options>]",
"perf ftrace {trace|latency} [<options>] [<command>]",
"perf ftrace {trace|latency} [<options>] -- [<command>] [<options>]",
NULL
};
enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE;
INIT_LIST_HEAD(&ftrace.filters);
INIT_LIST_HEAD(&ftrace.notrace);
INIT_LIST_HEAD(&ftrace.graph_funcs);
INIT_LIST_HEAD(&ftrace.nograph_funcs);
signal(SIGINT, sig_handler);
signal(SIGUSR1, sig_handler);
signal(SIGCHLD, sig_handler);
signal(SIGPIPE, sig_handler);
ret = perf_config(perf_ftrace_config, &ftrace);
if (ret < 0)
return -1;
if (argc > 1) {
if (!strcmp(argv[1], "trace")) {
subcmd = PERF_FTRACE_TRACE;
} else if (!strcmp(argv[1], "latency")) {
subcmd = PERF_FTRACE_LATENCY;
options = latency_options;
}
if (subcmd != PERF_FTRACE_NONE) {
argc--;
argv++;
}
}
/* for backward compatibility */
if (subcmd == PERF_FTRACE_NONE)
subcmd = PERF_FTRACE_TRACE;
argc = parse_options(argc, argv, options, ftrace_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (argc < 0) {
ret = -EINVAL;
goto out_delete_filters;
}
/* Make system wide (-a) the default target. */
if (!argc && target__none(&ftrace.target))
ftrace.target.system_wide = true;
switch (subcmd) {
case PERF_FTRACE_TRACE:
cmd_func = __cmd_ftrace;
break;
case PERF_FTRACE_LATENCY:
if (list_empty(&ftrace.filters)) {
pr_err("Should provide a function to measure\n");
parse_options_usage(ftrace_usage, options, "T", 1);
ret = -EINVAL;
goto out_delete_filters;
}
cmd_func = __cmd_latency;
break;
case PERF_FTRACE_NONE:
default:
pr_err("Invalid subcommand\n");
ret = -EINVAL;
goto out_delete_filters;
}
ret = target__validate(&ftrace.target);
if (ret) {
char errbuf[512];
target__strerror(&ftrace.target, ret, errbuf, 512);
pr_err("%s\n", errbuf);
goto out_delete_filters;
}
ftrace.evlist = evlist__new();
if (ftrace.evlist == NULL) {
ret = -ENOMEM;
goto out_delete_filters;
}
ret = evlist__create_maps(ftrace.evlist, &ftrace.target);
if (ret < 0)
goto out_delete_evlist;
if (argc) {
ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target,
argv, false,
ftrace__workload_exec_failed_signal);
if (ret < 0)
goto out_delete_evlist;
}
ret = cmd_func(&ftrace);
out_delete_evlist:
evlist__delete(ftrace.evlist);
out_delete_filters:
delete_filter_func(&ftrace.filters);
delete_filter_func(&ftrace.notrace);
delete_filter_func(&ftrace.graph_funcs);
delete_filter_func(&ftrace.nograph_funcs);
return ret;
}
| linux-master | tools/perf/builtin-ftrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-report.c
*
* Builtin report command: Analyze the perf.data input file,
* look up and read DSOs and symbol information and display
* a histogram of results, along various sorting keys.
*/
#include "builtin.h"
#include "util/config.h"
#include "util/annotate.h"
#include "util/color.h"
#include "util/dso.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
#include <linux/zalloc.h>
#include "util/map.h"
#include "util/symbol.h"
#include "util/map_symbol.h"
#include "util/mem-events.h"
#include "util/branch.h"
#include "util/callchain.h"
#include "util/values.h"
#include "perf.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evswitch.h"
#include "util/header.h"
#include "util/session.h"
#include "util/srcline.h"
#include "util/tool.h"
#include <subcmd/parse-options.h>
#include <subcmd/exec-cmd.h>
#include "util/parse-events.h"
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
#include "util/data.h"
#include "arch/common.h"
#include "util/time-utils.h"
#include "util/auxtrace.h"
#include "util/units.h"
#include "util/util.h" // perf_tip()
#include "ui/ui.h"
#include "ui/progress.h"
#include "util/block-info.h"
#include <dlfcn.h>
#include <errno.h>
#include <inttypes.h>
#include <regex.h>
#include <linux/ctype.h>
#include <signal.h>
#include <linux/bitmap.h>
#include <linux/string.h>
#include <linux/stringify.h>
#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <linux/mman.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
struct report {
struct perf_tool tool;
struct perf_session *session;
struct evswitch evswitch;
#ifdef HAVE_SLANG_SUPPORT
bool use_tui;
#endif
#ifdef HAVE_GTK2_SUPPORT
bool use_gtk;
#endif
bool use_stdio;
bool show_full_info;
bool show_threads;
bool inverted_callchain;
bool mem_mode;
bool stats_mode;
bool tasks_mode;
bool mmaps_mode;
bool header;
bool header_only;
bool nonany_branch_mode;
bool group_set;
bool stitch_lbr;
bool disable_order;
bool skip_empty;
int max_stack;
struct perf_read_values show_threads_values;
struct annotation_options annotation_opts;
const char *pretty_printing_style;
const char *cpu_list;
const char *symbol_filter_str;
const char *time_str;
struct perf_time_interval *ptime_range;
int range_size;
int range_num;
float min_percent;
u64 nr_entries;
u64 queue_size;
u64 total_cycles;
int socket_filter;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
struct branch_type_stat brtype_stat;
bool symbol_ipc;
bool total_cycles_mode;
struct block_report *block_reports;
int nr_block_reports;
};
static int report__config(const char *var, const char *value, void *cb)
{
struct report *rep = cb;
if (!strcmp(var, "report.group")) {
symbol_conf.event_group = perf_config_bool(var, value);
return 0;
}
if (!strcmp(var, "report.percent-limit")) {
double pcnt = strtof(value, NULL);
rep->min_percent = pcnt;
callchain_param.min_percent = pcnt;
return 0;
}
if (!strcmp(var, "report.children")) {
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
}
if (!strcmp(var, "report.queue-size"))
return perf_config_u64(&rep->queue_size, var, value);
if (!strcmp(var, "report.sort_order")) {
default_sort_order = strdup(value);
if (!default_sort_order) {
pr_err("Not enough memory for report.sort_order\n");
return -1;
}
return 0;
}
if (!strcmp(var, "report.skip-empty")) {
rep->skip_empty = perf_config_bool(var, value);
return 0;
}
pr_debug("%s variable unknown, ignoring...", var);
return 0;
}
static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct addr_location *al, bool single,
void *arg)
{
int err = 0;
struct report *rep = arg;
struct hist_entry *he = iter->he;
struct evsel *evsel = iter->evsel;
struct perf_sample *sample = iter->sample;
struct mem_info *mi;
struct branch_info *bi;
if (!ui__has_annotation() && !rep->symbol_ipc)
return 0;
if (sort__mode == SORT_MODE__BRANCH) {
bi = he->branch_info;
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
if (err)
goto out;
err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
} else if (rep->mem_mode) {
mi = he->mem_info;
err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel);
if (err)
goto out;
err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
} else if (symbol_conf.cumulate_callchain) {
if (single)
err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
} else {
err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
}
out:
return err;
}
static int hist_iter__branch_callback(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused,
bool single __maybe_unused,
void *arg)
{
struct hist_entry *he = iter->he;
struct report *rep = arg;
struct branch_info *bi = he->branch_info;
struct perf_sample *sample = iter->sample;
struct evsel *evsel = iter->evsel;
int err;
branch_type_count(&rep->brtype_stat, &bi->flags,
bi->from.addr, bi->to.addr);
if (!ui__has_annotation() && !rep->symbol_ipc)
return 0;
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
if (err)
goto out;
err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
out:
return err;
}
static void setup_forced_leader(struct report *report,
struct evlist *evlist)
{
if (report->group_set)
evlist__force_leader(evlist);
}
static int process_feature_event(struct perf_session *session,
union perf_event *event)
{
struct report *rep = container_of(session->tool, struct report, tool);
if (event->feat.feat_id < HEADER_LAST_FEATURE)
return perf_event__process_feature(session, event);
if (event->feat.feat_id != HEADER_LAST_FEATURE) {
pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
event->feat.feat_id);
return -1;
} else if (rep->header_only) {
session_done = 1;
}
/*
* (feat_id = HEADER_LAST_FEATURE) is the end marker which
* means all features are received, now we can force the
* group if needed.
*/
setup_forced_leader(rep, session->evlist);
return 0;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct report *rep = container_of(tool, struct report, tool);
struct addr_location al;
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = sample,
.hide_unresolved = symbol_conf.hide_unresolved,
.add_entry_cb = hist_iter__report_callback,
};
int ret = 0;
if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
sample->time)) {
return 0;
}
if (evswitch__discard(&rep->evswitch, evsel))
return 0;
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
ret = -1;
goto out_put;
}
if (rep->stitch_lbr)
thread__set_lbr_stitch_enable(al.thread, true);
if (symbol_conf.hide_unresolved && al.sym == NULL)
goto out_put;
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
goto out_put;
if (sort__mode == SORT_MODE__BRANCH) {
/*
* A non-synthesized event might not have a branch stack if
* branch stacks have been synthesized (using itrace options).
*/
if (!sample->branch_stack)
goto out_put;
iter.add_entry_cb = hist_iter__branch_callback;
iter.ops = &hist_iter_branch;
} else if (rep->mem_mode) {
iter.ops = &hist_iter_mem;
} else if (symbol_conf.cumulate_callchain) {
iter.ops = &hist_iter_cumulative;
} else {
iter.ops = &hist_iter_normal;
}
if (al.map != NULL)
map__dso(al.map)->hit = 1;
if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
hist__account_cycles(sample->branch_stack, &al, sample,
rep->nonany_branch_mode,
&rep->total_cycles);
}
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
if (ret < 0)
pr_debug("problem adding hist entry, skipping event\n");
out_put:
addr_location__exit(&al);
return ret;
}
static int process_read_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel,
struct machine *machine __maybe_unused)
{
struct report *rep = container_of(tool, struct report, tool);
if (rep->show_threads) {
const char *name = evsel__name(evsel);
int err = perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
evsel->core.idx,
name,
event->read.value);
if (err)
return err;
}
return 0;
}
/* For pipe mode, sample_type is not currently set */
static int report__setup_sample_type(struct report *rep)
{
struct perf_session *session = rep->session;
u64 sample_type = evlist__combined_sample_type(session->evlist);
bool is_pipe = perf_data__is_pipe(session->data);
struct evsel *evsel;
if (session->itrace_synth_opts->callchain ||
session->itrace_synth_opts->add_callchain ||
(!is_pipe &&
perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
!session->itrace_synth_opts->set))
sample_type |= PERF_SAMPLE_CALLCHAIN;
if (session->itrace_synth_opts->last_branch ||
session->itrace_synth_opts->add_last_branch)
sample_type |= PERF_SAMPLE_BRANCH_STACK;
if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (perf_hpp_list.parent) {
ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
"'perf record' without -g?\n");
return -EINVAL;
}
if (symbol_conf.use_callchain &&
!symbol_conf.show_branchflag_count) {
ui__error("Selected -g or --branch-history.\n"
"But no callchain or branch data.\n"
"Did you call 'perf record' without -g or -b?\n");
return -1;
}
} else if (!callchain_param.enabled &&
callchain_param.mode != CHAIN_NONE &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
}
}
if (symbol_conf.cumulate_callchain) {
/* Silently ignore if callchain is missing */
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
symbol_conf.cumulate_callchain = false;
perf_hpp__cancel_cumulate();
}
}
if (sort__mode == SORT_MODE__BRANCH) {
if (!is_pipe &&
!(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
ui__error("Selected -b but no branch data. "
"Did you call perf record without -b?\n");
return -1;
}
}
if (sort__mode == SORT_MODE__MEMORY) {
/*
* FIXUP: prior to kernel 5.18, Arm SPE missed to set
* PERF_SAMPLE_DATA_SRC bit in sample type. For backward
* compatibility, set the bit if it's an old perf data file.
*/
evlist__for_each_entry(session->evlist, evsel) {
if (strstr(evsel->name, "arm_spe") &&
!(sample_type & PERF_SAMPLE_DATA_SRC)) {
evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
sample_type |= PERF_SAMPLE_DATA_SRC;
}
}
if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
ui__error("Selected --mem-mode but no mem data. "
"Did you call perf record without -d?\n");
return -1;
}
}
callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env));
if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
"Please apply --call-graph lbr when recording.\n");
rep->stitch_lbr = false;
}
/* ??? handle more cases than just ANY? */
if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
rep->nonany_branch_mode = true;
#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
if (dwarf_callchain_users) {
ui__warning("Please install libunwind or libdw "
"development packages during the perf build.\n");
}
#endif
return 0;
}
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
const char *evname, FILE *fp)
{
size_t ret;
char unit;
unsigned long nr_samples = hists->stats.nr_samples;
u64 nr_events = hists->stats.total_period;
struct evsel *evsel = hists_to_evsel(hists);
char buf[512];
size_t size = sizeof(buf);
int socked_id = hists->socket_filter;
if (quiet)
return 0;
if (symbol_conf.filter_relative) {
nr_samples = hists->stats.nr_non_filtered_samples;
nr_events = hists->stats.total_non_filtered_period;
}
if (evsel__is_group_event(evsel)) {
struct evsel *pos;
evsel__group_desc(evsel, buf, size);
evname = buf;
for_each_group_member(pos, evsel) {
const struct hists *pos_hists = evsel__hists(pos);
if (symbol_conf.filter_relative) {
nr_samples += pos_hists->stats.nr_non_filtered_samples;
nr_events += pos_hists->stats.total_non_filtered_period;
} else {
nr_samples += pos_hists->stats.nr_samples;
nr_events += pos_hists->stats.total_period;
}
}
}
nr_samples = convert_unit(nr_samples, &unit);
ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
if (evname != NULL) {
ret += fprintf(fp, " of event%s '%s'",
evsel->core.nr_members > 1 ? "s" : "", evname);
}
if (rep->time_str)
ret += fprintf(fp, " (time slices: %s)", rep->time_str);
if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
ret += fprintf(fp, ", show reference callgraph");
}
if (rep->mem_mode) {
ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
} else
ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
if (socked_id > -1)
ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
return ret + fprintf(fp, "\n#\n");
}
static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep)
{
struct evsel *pos;
int i = 0, ret;
evlist__for_each_entry(evlist, pos) {
ret = report__browse_block_hists(&rep->block_reports[i++].hist,
rep->min_percent, pos,
&rep->session->header.env,
&rep->annotation_opts);
if (ret != 0)
return ret;
}
return 0;
}
static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help)
{
struct evsel *pos;
int i = 0;
if (!quiet) {
fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
evlist->stats.total_lost_samples);
}
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
const char *evname = evsel__name(pos);
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
if (rep->skip_empty && !hists->stats.nr_samples)
continue;
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
if (rep->total_cycles_mode) {
report__browse_block_hists(&rep->block_reports[i++].hist,
rep->min_percent, pos,
NULL, NULL);
continue;
}
hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
!(symbol_conf.use_callchain ||
symbol_conf.show_branchflag_count));
fprintf(stdout, "\n\n");
}
if (!quiet)
fprintf(stdout, "#\n# (%s)\n#\n", help);
if (rep->show_threads) {
bool style = !strcmp(rep->pretty_printing_style, "raw");
perf_read_values_display(stdout, &rep->show_threads_values,
style);
perf_read_values_destroy(&rep->show_threads_values);
}
if (sort__mode == SORT_MODE__BRANCH)
branch_type_stat_display(stdout, &rep->brtype_stat);
return 0;
}
static void report__warn_kptr_restrict(const struct report *rep)
{
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
if (evlist__exclude_kernel(rep->session->evlist))
return;
if (kernel_map == NULL ||
(map__dso(kernel_map)->hit &&
(kernel_kmap->ref_reloc_sym == NULL ||
kernel_kmap->ref_reloc_sym->addr == 0))) {
const char *desc =
"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
"can't be resolved.";
if (kernel_map && map__has_symbols(kernel_map)) {
desc = "If some relocation was applied (e.g. "
"kexec) symbols may be misresolved.";
}
ui__warning(
"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
"Samples in kernel modules can't be resolved as well.\n\n",
desc);
}
}
static int report__gtk_browse_hists(struct report *rep, const char *help)
{
int (*hist_browser)(struct evlist *evlist, const char *help,
struct hist_browser_timer *timer, float min_pcnt);
hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists");
if (hist_browser == NULL) {
ui__error("GTK browser not found!\n");
return -1;
}
return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
}
static int report__browse_hists(struct report *rep)
{
int ret;
struct perf_session *session = rep->session;
struct evlist *evlist = session->evlist;
char *help = NULL, *path = NULL;
path = system_path(TIPDIR);
if (perf_tip(&help, path) || help == NULL) {
/* fallback for people who don't install perf ;-) */
free(path);
path = system_path(DOCDIR);
if (perf_tip(&help, path) || help == NULL)
help = strdup("Cannot load tips.txt file, please install perf!");
}
free(path);
switch (use_browser) {
case 1:
if (rep->total_cycles_mode) {
ret = evlist__tui_block_hists_browse(evlist, rep);
break;
}
ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
&session->header.env, true, &rep->annotation_opts);
/*
* Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file.
*/
if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD)
ret = 0;
break;
case 2:
ret = report__gtk_browse_hists(rep, help);
break;
default:
ret = evlist__tty_browse_hists(evlist, rep, help);
break;
}
free(help);
return ret;
}
static int report__collapse_hists(struct report *rep)
{
struct ui_progress prog;
struct evsel *pos;
int ret = 0;
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
evlist__for_each_entry(rep->session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
if (pos->core.idx == 0)
hists->symbol_filter_str = rep->symbol_filter_str;
hists->socket_filter = rep->socket_filter;
ret = hists__collapse_resort(hists, &prog);
if (ret < 0)
break;
/* Non-group events are considered as leader */
if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
struct hists *leader_hists = evsel__hists(evsel__leader(pos));
hists__match(leader_hists, hists);
hists__link(leader_hists, hists);
}
}
ui_progress__finish();
return ret;
}
static int hists__resort_cb(struct hist_entry *he, void *arg)
{
struct report *rep = arg;
struct symbol *sym = he->ms.sym;
if (rep->symbol_ipc && sym && !sym->annotate2) {
struct evsel *evsel = hists_to_evsel(he->hists);
symbol__annotate2(&he->ms, evsel, &rep->annotation_opts, NULL);
}
return 0;
}
static void report__output_resort(struct report *rep)
{
struct ui_progress prog;
struct evsel *pos;
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
evlist__for_each_entry(rep->session->evlist, pos) {
evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
}
ui_progress__finish();
}
static int count_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel,
struct machine *machine __maybe_unused)
{
struct hists *hists = evsel__hists(evsel);
hists__inc_nr_events(hists);
return 0;
}
static int count_lost_samples_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
struct report *rep = container_of(tool, struct report, tool);
struct evsel *evsel;
evsel = evlist__id2evsel(rep->session->evlist, sample->id);
if (evsel) {
hists__inc_nr_lost_samples(evsel__hists(evsel),
event->lost_samples.lost);
}
return 0;
}
static int process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist);
static void stats_setup(struct report *rep)
{
memset(&rep->tool, 0, sizeof(rep->tool));
rep->tool.attr = process_attr;
rep->tool.sample = count_sample_event;
rep->tool.lost_samples = count_lost_samples_event;
rep->tool.no_warn = true;
}
static int stats_print(struct report *rep)
{
struct perf_session *session = rep->session;
perf_session__fprintf_nr_events(session, stdout, rep->skip_empty);
evlist__fprintf_nr_events(session->evlist, stdout, rep->skip_empty);
return 0;
}
static void tasks_setup(struct report *rep)
{
memset(&rep->tool, 0, sizeof(rep->tool));
rep->tool.ordered_events = true;
if (rep->mmaps_mode) {
rep->tool.mmap = perf_event__process_mmap;
rep->tool.mmap2 = perf_event__process_mmap2;
}
rep->tool.attr = process_attr;
rep->tool.comm = perf_event__process_comm;
rep->tool.exit = perf_event__process_exit;
rep->tool.fork = perf_event__process_fork;
rep->tool.no_warn = true;
}
struct task {
struct thread *thread;
struct list_head list;
struct list_head children;
};
static struct task *tasks_list(struct task *task, struct machine *machine)
{
struct thread *parent_thread, *thread = task->thread;
struct task *parent_task;
/* Already listed. */
if (!list_empty(&task->list))
return NULL;
/* Last one in the chain. */
if (thread__ppid(thread) == -1)
return task;
parent_thread = machine__find_thread(machine, -1, thread__ppid(thread));
if (!parent_thread)
return ERR_PTR(-ENOENT);
parent_task = thread__priv(parent_thread);
thread__put(parent_thread);
list_add_tail(&task->list, &parent_task->children);
return tasks_list(parent_task, machine);
}
static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
{
size_t printed = 0;
struct map_rb_node *rb_node;
maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
const struct dso *dso = map__dso(map);
u32 prot = map__prot(map);
printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
indent, "", map__start(map), map__end(map),
prot & PROT_READ ? 'r' : '-',
prot & PROT_WRITE ? 'w' : '-',
prot & PROT_EXEC ? 'x' : '-',
map__flags(map) ? 's' : 'p',
map__pgoff(map),
dso->id.ino, dso->name);
}
return printed;
}
static void task__print_level(struct task *task, FILE *fp, int level)
{
struct thread *thread = task->thread;
struct task *child;
int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
thread__pid(thread), thread__tid(thread),
thread__ppid(thread), level, "");
fprintf(fp, "%s\n", thread__comm_str(thread));
maps__fprintf_task(thread__maps(thread), comm_indent, fp);
if (!list_empty(&task->children)) {
list_for_each_entry(child, &task->children, list)
task__print_level(child, fp, level + 1);
}
}
static int tasks_print(struct report *rep, FILE *fp)
{
struct perf_session *session = rep->session;
struct machine *machine = &session->machines.host;
struct task *tasks, *task;
unsigned int nr = 0, itask = 0, i;
struct rb_node *nd;
LIST_HEAD(list);
/*
* No locking needed while accessing machine->threads,
* because --tasks is single threaded command.
*/
/* Count all the threads. */
for (i = 0; i < THREADS__TABLE_SIZE; i++)
nr += machine->threads[i].nr;
tasks = malloc(sizeof(*tasks) * nr);
if (!tasks)
return -ENOMEM;
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
task = tasks + itask++;
task->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
INIT_LIST_HEAD(&task->children);
INIT_LIST_HEAD(&task->list);
thread__set_priv(task->thread, task);
}
}
/*
* Iterate every task down to the unprocessed parent
* and link all in task children list. Task with no
* parent is added into 'list'.
*/
for (itask = 0; itask < nr; itask++) {
task = tasks + itask;
if (!list_empty(&task->list))
continue;
task = tasks_list(task, machine);
if (IS_ERR(task)) {
pr_err("Error: failed to process tasks\n");
free(tasks);
return PTR_ERR(task);
}
if (task)
list_add_tail(&task->list, &list);
}
fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
list_for_each_entry(task, &list, list)
task__print_level(task, fp, 0);
free(tasks);
return 0;
}
static int __cmd_report(struct report *rep)
{
int ret;
struct perf_session *session = rep->session;
struct evsel *pos;
struct perf_data *data = session->data;
signal(SIGINT, sig_handler);
if (rep->cpu_list) {
ret = perf_session__cpu_bitmap(session, rep->cpu_list,
rep->cpu_bitmap);
if (ret) {
ui__error("failed to set cpu bitmap\n");
return ret;
}
session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
}
if (rep->show_threads) {
ret = perf_read_values_init(&rep->show_threads_values);
if (ret)
return ret;
}
ret = report__setup_sample_type(rep);
if (ret) {
/* report__setup_sample_type() already showed error message */
return ret;
}
if (rep->stats_mode)
stats_setup(rep);
if (rep->tasks_mode)
tasks_setup(rep);
ret = perf_session__process_events(session);
if (ret) {
ui__error("failed to process sample\n");
return ret;
}
evlist__check_mem_load_aux(session->evlist);
if (rep->stats_mode)
return stats_print(rep);
if (rep->tasks_mode)
return tasks_print(rep, stdout);
report__warn_kptr_restrict(rep);
evlist__for_each_entry(session->evlist, pos)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (use_browser == 0) {
if (verbose > 3)
perf_session__fprintf(session, stdout);
if (verbose > 2)
perf_session__fprintf_dsos(session, stdout);
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout,
rep->skip_empty);
evlist__fprintf_nr_events(session->evlist, stdout,
rep->skip_empty);
return 0;
}
}
ret = report__collapse_hists(rep);
if (ret) {
ui__error("failed to process hist entry\n");
return ret;
}
if (session_done())
return 0;
/*
* recalculate number of entries after collapsing since it
* might be changed during the collapse phase.
*/
rep->nr_entries = 0;
evlist__for_each_entry(session->evlist, pos)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (rep->nr_entries == 0) {
ui__error("The %s data has no samples!\n", data->path);
return 0;
}
report__output_resort(rep);
if (rep->total_cycles_mode) {
int block_hpps[6] = {
PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
PERF_HPP_REPORT__BLOCK_RANGE,
PERF_HPP_REPORT__BLOCK_DSO,
};
rep->block_reports = block_info__create_report(session->evlist,
rep->total_cycles,
block_hpps, 6,
&rep->nr_block_reports);
if (!rep->block_reports)
return -1;
}
return report__browse_hists(rep);
}
static int
report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct callchain_param *callchain = opt->value;
callchain->enabled = !unset;
/*
* --no-call-graph
*/
if (unset) {
symbol_conf.use_callchain = false;
callchain->mode = CHAIN_NONE;
return 0;
}
return parse_callchain_report_opt(arg);
}
static int
parse_time_quantum(const struct option *opt, const char *arg,
int unset __maybe_unused)
{
unsigned long *time_q = opt->value;
char *end;
*time_q = strtoul(arg, &end, 0);
if (end == arg)
goto parse_err;
if (*time_q == 0) {
pr_err("time quantum cannot be 0");
return -1;
}
end = skip_spaces(end);
if (*end == 0)
return 0;
if (!strcmp(end, "s")) {
*time_q *= NSEC_PER_SEC;
return 0;
}
if (!strcmp(end, "ms")) {
*time_q *= NSEC_PER_MSEC;
return 0;
}
if (!strcmp(end, "us")) {
*time_q *= NSEC_PER_USEC;
return 0;
}
if (!strcmp(end, "ns"))
return 0;
parse_err:
pr_err("Cannot parse time quantum `%s'\n", arg);
return -1;
}
int
report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
if (arg) {
int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
if (err) {
char buf[BUFSIZ];
regerror(err, &ignore_callees_regex, buf, sizeof(buf));
pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
return -1;
}
have_ignore_callees = 1;
}
return 0;
}
static int
parse_branch_mode(const struct option *opt,
const char *str __maybe_unused, int unset)
{
int *branch_mode = opt->value;
*branch_mode = !unset;
return 0;
}
static int
parse_percent_limit(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct report *rep = opt->value;
double pcnt = strtof(str, NULL);
rep->min_percent = pcnt;
callchain_param.min_percent = pcnt;
return 0;
}
static int process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
u64 sample_type;
int err;
err = perf_event__process_attr(tool, event, pevlist);
if (err)
return err;
/*
* Check if we need to enable callchains based
* on events sample_type.
*/
sample_type = evlist__combined_sample_type(*pevlist);
callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
return 0;
}
int cmd_report(int argc, const char **argv)
{
struct perf_session *session;
struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
struct stat st;
bool has_br_stack = false;
int branch_mode = -1;
int last_key = 0;
bool branch_call_mode = false;
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
CALLCHAIN_REPORT_HELP
"\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char * const report_usage[] = {
"perf report [<options>]",
NULL
};
struct report report = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
.cgroup = perf_event__process_cgroup,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.read = process_read_event,
.attr = process_attr,
#ifdef HAVE_LIBTRACEEVENT
.tracing_data = perf_event__process_tracing_data,
#endif
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.event_update = perf_event__process_event_update,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
.max_stack = PERF_MAX_STACK_DEPTH,
.pretty_printing_style = "normal",
.socket_filter = -1,
.skip_empty = true,
};
char *sort_order_help = sort_help("sort by key(s):");
char *field_order_help = sort_help("output field(s): overhead period sample ");
const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
"don't load vmlinux even if found"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
OPT_BOOLEAN('T', "threads", &report.show_threads,
"Show per-thread event counters"),
OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
"pretty printing style key: normal raw"),
#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
#endif
#ifdef HAVE_GTK2_SUPPORT
OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
#endif
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
"Use the stdio interface"),
OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
sort_order_help),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
field_order_help),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
OPT_STRING('p', "parent", &parent_pattern, "regex",
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
report_callchain_help, &report_parse_callchain_opt,
callchain_default_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
"Accumulate callchains of children and show total overhead as well. "
"Enabled by default, use --no-children to disable."),
OPT_INTEGER(0, "max-stack", &report.max_stack,
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
"alias for inverted call graph"),
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
"ignore callees of these functions in call graphs",
report_parse_ignore_callees_opt),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
"only consider symbols in these pids"),
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
"only consider symbols in these tids"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
"only show symbols that (partially) match with this filter"),
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
"width[,width...]",
"don't try to adjust column width, use these fixed values"),
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
"Only display entries resolved to a symbol"),
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
"list of cpus to profile"),
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
"Display extended information about perf.data file"),
OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
OPT_STRING(0, "prefix-strip", &report.annotation_opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
"Show event group information together"),
OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
"Sort the output by the event at the index n in group. "
"If n is invalid, sort by the first event. "
"WARNING: should be used on grouped events."),
OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
"use branch records for per branch histogram filling",
parse_branch_mode),
OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
"add last branch records to call history"),
OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING(0, "addr2line", &addr2line_path, "path",
"addr2line binary to use for line numbers"),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Disable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
"Number of samples to save per histogram entry for individual browsing"),
OPT_CALLBACK(0, "percent-limit", &report, "percent",
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"how to display percentage of filtered entries", parse_filter_percentage),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
"Show full source file name path for source lines"),
OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
"Show callgraph from reference event"),
OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
"only show processor socket that match with this filter"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
"Show raw trace event output (do not use print fmt or plugins)"),
OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
"Show entries in a hierarchy"),
OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
"'always' (default), 'never' or 'auto' only applicable to --stdio mode",
stdio__config_color, "always"),
OPT_STRING(0, "time", &report.time_str, "str",
"Time span of interest (start,stop)"),
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
"Show inline function"),
OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
"Set time quantum for time sort key (default 100ms)",
parse_time_quantum),
OPTS_EVSWITCH(&report.evswitch),
OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
"Sort all blocks by 'Sampled Cycles%'"),
OPT_BOOLEAN(0, "disable-order", &report.disable_order,
"Disable raw trace ordering"),
OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
"Do not display empty (or dummy) events in the output"),
OPT_END()
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
int ret = hists__init();
char sort_tmp[128];
if (ret < 0)
goto exit;
annotation_options__init(&report.annotation_opts);
ret = perf_config(report__config, &report);
if (ret)
goto exit;
argc = parse_options(argc, argv, options, report_usage, 0);
if (argc) {
/*
* Special case: if there's an argument left then assume that
* it's a symbol filter:
*/
if (argc > 1)
usage_with_options(report_usage, options);
report.symbol_filter_str = argv[0];
}
if (disassembler_style) {
report.annotation_opts.disassembler_style = strdup(disassembler_style);
if (!report.annotation_opts.disassembler_style)
return -ENOMEM;
}
if (objdump_path) {
report.annotation_opts.objdump_path = strdup(objdump_path);
if (!report.annotation_opts.objdump_path)
return -ENOMEM;
}
if (addr2line_path) {
symbol_conf.addr2line_path = strdup(addr2line_path);
if (!symbol_conf.addr2line_path)
return -ENOMEM;
}
if (annotate_check_args(&report.annotation_opts) < 0) {
ret = -EINVAL;
goto exit;
}
if (report.mmaps_mode)
report.tasks_mode = true;
if (dump_trace && report.disable_order)
report.tool.ordered_events = false;
if (quiet)
perf_quiet_option();
ret = symbol__validate_sym_arguments();
if (ret)
goto exit;
if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
callchain_param.order = ORDER_CALLER;
if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
(int)itrace_synth_opts.callchain_sz > report.max_stack)
report.max_stack = itrace_synth_opts.callchain_sz;
if (!input_name || !strlen(input_name)) {
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
input_name = "-";
else
input_name = "perf.data";
}
data.path = input_name;
data.force = symbol_conf.force;
repeat:
session = perf_session__new(&data, &report.tool);
if (IS_ERR(session)) {
ret = PTR_ERR(session);
goto exit;
}
ret = evswitch__init(&report.evswitch, session->evlist, stderr);
if (ret)
goto exit;
if (zstd_init(&(session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
if (report.queue_size) {
ordered_events__set_alloc_size(&session->ordered_events,
report.queue_size);
}
session->itrace_synth_opts = &itrace_synth_opts;
report.session = session;
has_br_stack = perf_header__has_feat(&session->header,
HEADER_BRANCH_STACK);
if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
has_br_stack = false;
setup_forced_leader(&report, session->evlist);
if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) {
parse_options_usage(NULL, options, "group-sort-idx", 0);
ret = -EINVAL;
goto error;
}
if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
has_br_stack = true;
if (has_br_stack && branch_call_mode)
symbol_conf.show_branchflag_count = true;
memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat));
/*
* Branch mode is a tristate:
* -1 means default, so decide based on the file having branch data.
* 0/1 means the user chose a mode.
*/
if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
!branch_call_mode) {
sort__mode = SORT_MODE__BRANCH;
symbol_conf.cumulate_callchain = false;
}
if (branch_call_mode) {
callchain_param.key = CCKEY_ADDRESS;
callchain_param.branch_callstack = true;
symbol_conf.use_callchain = true;
callchain_register_param(&callchain_param);
if (sort_order == NULL)
sort_order = "srcline,symbol,dso";
}
if (report.mem_mode) {
if (sort__mode == SORT_MODE__BRANCH) {
pr_err("branch and mem mode incompatible\n");
goto error;
}
sort__mode = SORT_MODE__MEMORY;
symbol_conf.cumulate_callchain = false;
}
if (symbol_conf.report_hierarchy) {
/* disable incompatible options */
symbol_conf.cumulate_callchain = false;
if (field_order) {
pr_err("Error: --hierarchy and --fields options cannot be used together\n");
parse_options_usage(report_usage, options, "F", 1);
parse_options_usage(NULL, options, "hierarchy", 0);
goto error;
}
perf_hpp_list.need_collapse = true;
}
if (report.use_stdio)
use_browser = 0;
#ifdef HAVE_SLANG_SUPPORT
else if (report.use_tui)
use_browser = 1;
#endif
#ifdef HAVE_GTK2_SUPPORT
else if (report.use_gtk)
use_browser = 2;
#endif
/* Force tty output for header output and per-thread stat. */
if (report.header || report.header_only || report.show_threads)
use_browser = 0;
if (report.header || report.header_only)
report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
if (report.show_full_info)
report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
if (report.stats_mode || report.tasks_mode)
use_browser = 0;
if (report.stats_mode && report.tasks_mode) {
pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
goto error;
}
if (report.total_cycles_mode) {
if (sort__mode != SORT_MODE__BRANCH)
report.total_cycles_mode = false;
else
sort_order = NULL;
}
if (strcmp(input_name, "-") != 0)
setup_browser(true);
else
use_browser = 0;
if (sort_order && strstr(sort_order, "ipc")) {
parse_options_usage(report_usage, options, "s", 1);
goto error;
}
if (sort_order && strstr(sort_order, "symbol")) {
if (sort__mode == SORT_MODE__BRANCH) {
snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
sort_order, "ipc_lbr");
report.symbol_ipc = true;
} else {
snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
sort_order, "ipc_null");
}
sort_order = sort_tmp;
}
if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
(setup_sorting(session->evlist) < 0)) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
if (field_order)
parse_options_usage(sort_order ? NULL : report_usage,
options, "F", 1);
goto error;
}
if ((report.header || report.header_only) && !quiet) {
perf_session__fprintf_info(session, stdout,
report.show_full_info);
if (report.header_only) {
if (data.is_pipe) {
/*
* we need to process first few records
* which contains PERF_RECORD_HEADER_FEATURE.
*/
perf_session__process_events(session);
}
ret = 0;
goto error;
}
} else if (use_browser == 0 && !quiet &&
!report.stats_mode && !report.tasks_mode) {
fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
stdout);
}
/*
* Only in the TUI browser we are doing integrated annotation,
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
if (ui__has_annotation() || report.symbol_ipc ||
report.total_cycles_mode) {
ret = symbol__annotation_init();
if (ret < 0)
goto error;
/*
* For searching by name on the "Browse map details".
* providing it only in verbose mode not to bloat too
* much struct symbol.
*/
if (verbose > 0) {
/*
* XXX: Need to provide a less kludgy way to ask for
* more space per symbol, the u32 is for the index on
* the ui browser.
* See symbol__browser_index.
*/
symbol_conf.priv_size += sizeof(u32);
}
annotation_config__init(&report.annotation_opts);
}
if (symbol__init(&session->header.env) < 0)
goto error;
if (report.time_str) {
ret = perf_time__parse_for_ranges(report.time_str, session,
&report.ptime_range,
&report.range_size,
&report.range_num);
if (ret < 0)
goto error;
itrace_synth_opts__set_time_range(&itrace_synth_opts,
report.ptime_range,
report.range_num);
}
#ifdef HAVE_LIBTRACEEVENT
if (session->tevent.pevent &&
tep_set_function_resolver(session->tevent.pevent,
machine__resolve_kernel_addr,
&session->machines.host) < 0) {
pr_err("%s: failed to set libtraceevent function resolver\n",
__func__);
return -1;
}
#endif
sort__setup_elide(stdout);
ret = __cmd_report(&report);
if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) {
perf_session__delete(session);
last_key = K_SWITCH_INPUT_DATA;
goto repeat;
} else
ret = 0;
error:
if (report.ptime_range) {
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
zfree(&report.ptime_range);
}
if (report.block_reports) {
block_info__free_report(report.block_reports,
report.nr_block_reports);
report.block_reports = NULL;
}
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
exit:
annotation_options__exit(&report.annotation_opts);
free(sort_order_help);
free(field_order_help);
return ret;
}
| linux-master | tools/perf/builtin-report.c |
/*
* perf.c
*
* Performance analysis utility.
*
* This is the main hub from which the sub-commands (perf stat,
* perf top, perf record, perf report, etc.) are started.
*/
#include "builtin.h"
#include "perf.h"
#include "util/build-id.h"
#include "util/cache.h"
#include "util/env.h"
#include <internal/lib.h> // page_size
#include <subcmd/exec-cmd.h>
#include "util/config.h"
#include <subcmd/run-command.h>
#include "util/parse-events.h"
#include <subcmd/parse-options.h>
#include "util/debug.h"
#include "util/event.h"
#include "util/util.h" // usage()
#include "ui/ui.h"
#include "perf-sys.h"
#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#include <perf/core.h>
#include <errno.h>
#include <pthread.h>
#include <signal.h>
#include <stdlib.h>
#include <time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
static int use_pager = -1;
struct cmd_struct {
const char *cmd;
int (*fn)(int, const char **);
int option;
};
static struct cmd_struct commands[] = {
{ "archive", NULL, 0 },
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
{ "config", cmd_config, 0 },
{ "c2c", cmd_c2c, 0 },
{ "diff", cmd_diff, 0 },
{ "evlist", cmd_evlist, 0 },
{ "help", cmd_help, 0 },
{ "iostat", NULL, 0 },
{ "kallsyms", cmd_kallsyms, 0 },
{ "list", cmd_list, 0 },
{ "record", cmd_record, 0 },
{ "report", cmd_report, 0 },
{ "bench", cmd_bench, 0 },
{ "stat", cmd_stat, 0 },
#ifdef HAVE_LIBTRACEEVENT
{ "timechart", cmd_timechart, 0 },
#endif
{ "top", cmd_top, 0 },
{ "annotate", cmd_annotate, 0 },
{ "version", cmd_version, 0 },
{ "script", cmd_script, 0 },
#ifdef HAVE_LIBTRACEEVENT
{ "sched", cmd_sched, 0 },
#endif
#ifdef HAVE_LIBELF_SUPPORT
{ "probe", cmd_probe, 0 },
#endif
#ifdef HAVE_LIBTRACEEVENT
{ "kmem", cmd_kmem, 0 },
{ "lock", cmd_lock, 0 },
#endif
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
#if defined(HAVE_LIBTRACEEVENT) && (defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT))
{ "trace", cmd_trace, 0 },
#endif
{ "inject", cmd_inject, 0 },
{ "mem", cmd_mem, 0 },
{ "data", cmd_data, 0 },
{ "ftrace", cmd_ftrace, 0 },
{ "daemon", cmd_daemon, 0 },
#ifdef HAVE_LIBTRACEEVENT
{ "kwork", cmd_kwork, 0 },
#endif
};
struct pager_config {
const char *cmd;
int val;
};
static bool same_cmd_with_prefix(const char *var, struct pager_config *c,
const char *header)
{
return (strstarts(var, header) && !strcmp(var + strlen(header), c->cmd));
}
static int pager_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
if (same_cmd_with_prefix(var, c, "pager."))
c->val = perf_config_bool(var, value);
return 0;
}
/* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */
static int check_pager_config(const char *cmd)
{
int err;
struct pager_config c;
c.cmd = cmd;
c.val = -1;
err = perf_config(pager_command_config, &c);
return err ?: c.val;
}
static int browser_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
if (same_cmd_with_prefix(var, c, "tui."))
c->val = perf_config_bool(var, value);
if (same_cmd_with_prefix(var, c, "gtk."))
c->val = perf_config_bool(var, value) ? 2 : 0;
return 0;
}
/*
* returns 0 for "no tui", 1 for "use tui", 2 for "use gtk",
* and -1 for "not specified"
*/
static int check_browser_config(const char *cmd)
{
int err;
struct pager_config c;
c.cmd = cmd;
c.val = -1;
err = perf_config(browser_command_config, &c);
return err ?: c.val;
}
static void commit_pager_choice(void)
{
switch (use_pager) {
case 0:
setenv(PERF_PAGER_ENVIRONMENT, "cat", 1);
break;
case 1:
/* setup_pager(); */
break;
default:
break;
}
}
struct option options[] = {
OPT_ARGUMENT("help", "help"),
OPT_ARGUMENT("version", "version"),
OPT_ARGUMENT("exec-path", "exec-path"),
OPT_ARGUMENT("html-path", "html-path"),
OPT_ARGUMENT("paginate", "paginate"),
OPT_ARGUMENT("no-pager", "no-pager"),
OPT_ARGUMENT("debugfs-dir", "debugfs-dir"),
OPT_ARGUMENT("buildid-dir", "buildid-dir"),
OPT_ARGUMENT("list-cmds", "list-cmds"),
OPT_ARGUMENT("list-opts", "list-opts"),
OPT_ARGUMENT("debug", "debug"),
OPT_END()
};
static int handle_options(const char ***argv, int *argc, int *envchanged)
{
int handled = 0;
while (*argc > 0) {
const char *cmd = (*argv)[0];
if (cmd[0] != '-')
break;
/*
* For legacy reasons, the "version" and "help"
* commands can be written with "--" prepended
* to make them look like flags.
*/
if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version"))
break;
/*
* Shortcut for '-h' and '-v' options to invoke help
* and version command.
*/
if (!strcmp(cmd, "-h")) {
(*argv)[0] = "--help";
break;
}
if (!strcmp(cmd, "-v")) {
(*argv)[0] = "--version";
break;
}
if (!strcmp(cmd, "-vv")) {
(*argv)[0] = "version";
verbose = 1;
break;
}
/*
* Check remaining flags.
*/
if (strstarts(cmd, CMD_EXEC_PATH)) {
cmd += strlen(CMD_EXEC_PATH);
if (*cmd == '=')
set_argv_exec_path(cmd + 1);
else {
puts(get_argv_exec_path());
exit(0);
}
} else if (!strcmp(cmd, "--html-path")) {
puts(system_path(PERF_HTML_PATH));
exit(0);
} else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
use_pager = 1;
} else if (!strcmp(cmd, "--no-pager")) {
use_pager = 0;
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--debugfs-dir")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --debugfs-dir.\n");
usage(perf_usage_string);
}
tracing_path_set((*argv)[1]);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!strcmp(cmd, "--buildid-dir")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --buildid-dir.\n");
usage(perf_usage_string);
}
set_buildid_dir((*argv)[1]);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (strstarts(cmd, CMD_DEBUGFS_DIR)) {
tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR));
fprintf(stderr, "dir: %s\n", tracing_path_mount());
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--list-cmds")) {
unsigned int i;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
struct cmd_struct *p = commands+i;
printf("%s ", p->cmd);
}
putchar('\n');
exit(0);
} else if (!strcmp(cmd, "--list-opts")) {
unsigned int i;
for (i = 0; i < ARRAY_SIZE(options)-1; i++) {
struct option *p = options+i;
printf("--%s ", p->long_name);
}
putchar('\n');
exit(0);
} else if (!strcmp(cmd, "--debug")) {
if (*argc < 2) {
fprintf(stderr, "No variable specified for --debug.\n");
usage(perf_usage_string);
}
if (perf_debug_option((*argv)[1]))
usage(perf_usage_string);
(*argv)++;
(*argc)--;
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
usage(perf_usage_string);
}
(*argv)++;
(*argc)--;
handled++;
}
return handled;
}
#define RUN_SETUP (1<<0)
#define USE_PAGER (1<<1)
static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
{
int status;
struct stat st;
char sbuf[STRERR_BUFSIZE];
if (use_browser == -1)
use_browser = check_browser_config(p->cmd);
if (use_pager == -1 && p->option & RUN_SETUP)
use_pager = check_pager_config(p->cmd);
if (use_pager == -1 && p->option & USE_PAGER)
use_pager = 1;
commit_pager_choice();
perf_env__init(&perf_env);
perf_env__set_cmdline(&perf_env, argc, argv);
status = p->fn(argc, argv);
perf_config__exit();
exit_browser(status);
perf_env__exit(&perf_env);
if (status)
return status & 0xff;
/* Somebody closed stdout? */
if (fstat(fileno(stdout), &st))
return 0;
/* Ignore write errors for pipes and sockets.. */
if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode))
return 0;
status = 1;
/* Check for ENOSPC and EIO errors.. */
if (fflush(stdout)) {
fprintf(stderr, "write failure on standard output: %s",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out;
}
if (ferror(stdout)) {
fprintf(stderr, "unknown write failure on standard output");
goto out;
}
if (fclose(stdout)) {
fprintf(stderr, "close failed on standard output: %s",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out;
}
status = 0;
out:
return status;
}
static void handle_internal_command(int argc, const char **argv)
{
const char *cmd = argv[0];
unsigned int i;
/* Turn "perf cmd --help" into "perf help cmd" */
if (argc > 1 && !strcmp(argv[1], "--help")) {
argv[1] = argv[0];
argv[0] = cmd = "help";
}
for (i = 0; i < ARRAY_SIZE(commands); i++) {
struct cmd_struct *p = commands+i;
if (p->fn == NULL)
continue;
if (strcmp(p->cmd, cmd))
continue;
exit(run_builtin(p, argc, argv));
}
}
static void execv_dashed_external(const char **argv)
{
char *cmd;
const char *tmp;
int status;
if (asprintf(&cmd, "perf-%s", argv[0]) < 0)
goto do_die;
/*
* argv[0] must be the perf command, but the argv array
* belongs to the caller, and may be reused in
* subsequent loop iterations. Save argv[0] and
* restore it on error.
*/
tmp = argv[0];
argv[0] = cmd;
/*
* if we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code.
*/
status = run_command_v_opt(argv, 0);
if (status != -ERR_RUN_COMMAND_EXEC) {
if (IS_RUN_COMMAND_ERR(status)) {
do_die:
pr_err("FATAL: unable to run '%s'", argv[0]);
status = -128;
}
exit(-status);
}
errno = ENOENT; /* as if we called execvp */
argv[0] = tmp;
zfree(&cmd);
}
static int run_argv(int *argcp, const char ***argv)
{
/* See if it's an internal command */
handle_internal_command(*argcp, *argv);
/* .. then try the external ones */
execv_dashed_external(*argv);
return 0;
}
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
{
return veprintf(level, verbose, fmt, ap);
}
int main(int argc, const char **argv)
{
int err;
const char *cmd;
char sbuf[STRERR_BUFSIZE];
perf_debug_setup();
/* libsubcmd init */
exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
pager_init(PERF_PAGER_ENVIRONMENT);
libperf_init(libperf_print);
cmd = extract_argv0_path(argv[0]);
if (!cmd)
cmd = "perf-help";
srandom(time(NULL));
/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
config_exclusive_filename = getenv("PERF_CONFIG");
err = perf_config(perf_default_config, NULL);
if (err)
return err;
set_buildid_dir(NULL);
/*
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
*
* - cannot take flags in between the "perf" and the "xxxx".
* - cannot execute it externally (since it would just do
* the same thing over again)
*
* So we just directly call the internal command handler. If that one
* fails to handle this, then maybe we just run a renamed perf binary
* that contains a dash in its name. To handle this scenario, we just
* fall through and ignore the "xxxx" part of the command string.
*/
if (strstarts(cmd, "perf-")) {
cmd += 5;
argv[0] = cmd;
handle_internal_command(argc, argv);
/*
* If the command is handled, the above function does not
* return undo changes and fall through in such a case.
*/
cmd -= 5;
argv[0] = cmd;
}
if (strstarts(cmd, "trace")) {
#ifndef HAVE_LIBTRACEEVENT
fprintf(stderr,
"trace command not available: missing libtraceevent devel package at build time.\n");
goto out;
#elif !defined(HAVE_LIBAUDIT_SUPPORT) && !defined(HAVE_SYSCALL_TABLE_SUPPORT)
fprintf(stderr,
"trace command not available: missing audit-libs devel package at build time.\n");
goto out;
#else
setup_path();
argv[0] = "trace";
return cmd_trace(argc, argv);
#endif
}
/* Look for flags.. */
argv++;
argc--;
handle_options(&argv, &argc, NULL);
commit_pager_choice();
if (argc > 0) {
if (strstarts(argv[0], "--"))
argv[0] += 2;
} else {
/* The user didn't specify a command; give them help */
printf("\n usage: %s\n\n", perf_usage_string);
list_common_cmds_help();
printf("\n %s\n\n", perf_more_info_string);
goto out;
}
cmd = argv[0];
test_attr__init();
/*
* We use PATH to find perf commands, but we prepend some higher
* precedence paths: the "--exec-path" option, the PERF_EXEC_PATH
* environment, and the $(perfexecdir) from the Makefile at build
* time.
*/
setup_path();
/*
* Block SIGWINCH notifications so that the thread that wants it can
* unblock and get syscalls like select interrupted instead of waiting
* forever while the signal goes to some other non interested thread.
*/
pthread__block_sigwinch();
while (1) {
static int done_help;
run_argv(&argc, &argv);
if (errno != ENOENT)
break;
if (!done_help) {
cmd = argv[0] = help_unknown_cmd(cmd);
done_help = 1;
} else
break;
}
fprintf(stderr, "Failed to run command '%s': %s\n",
cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
out:
return 1;
}
| linux-master | tools/perf/perf.c |
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
#include "perf-sys.h"
#include "util/cpumap.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
#include "util/mutex.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/cloexec.h"
#include "util/thread_map.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/string2.h"
#include "util/callchain.h"
#include "util/time-utils.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/debug.h"
#include "util/event.h"
#include "util/util.h"
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <inttypes.h>
#include <errno.h>
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
#include <api/fs/fs.h>
#include <perf/cpumap.h>
#include <linux/time64.h>
#include <linux/err.h>
#include <linux/ctype.h>
#define PR_SET_NAME 15 /* Set process name */
#define MAX_CPUS 4096
#define COMM_LEN 20
#define SYM_LEN 129
#define MAX_PID 1024000
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
struct sched_atom;
struct task_desc {
unsigned long nr;
unsigned long pid;
char comm[COMM_LEN];
unsigned long nr_events;
unsigned long curr_event;
struct sched_atom **atoms;
pthread_t thread;
sem_t sleep_sem;
sem_t ready_for_work;
sem_t work_done_sem;
u64 cpu_usage;
};
enum sched_event_type {
SCHED_EVENT_RUN,
SCHED_EVENT_SLEEP,
SCHED_EVENT_WAKEUP,
SCHED_EVENT_MIGRATION,
};
struct sched_atom {
enum sched_event_type type;
int specific_wait;
u64 timestamp;
u64 duration;
unsigned long nr;
sem_t *wait_sem;
struct task_desc *wakee;
};
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
/* task state bitmask, copied from include/linux/sched.h */
#define TASK_RUNNING 0
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
#define __TASK_STOPPED 4
#define __TASK_TRACED 8
/* in tsk->exit_state */
#define EXIT_DEAD 16
#define EXIT_ZOMBIE 32
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
enum thread_state {
THREAD_SLEEPING = 0,
THREAD_WAIT_CPU,
THREAD_SCHED_IN,
THREAD_IGNORE
};
struct work_atom {
struct list_head list;
enum thread_state state;
u64 sched_out_time;
u64 wake_up_time;
u64 sched_in_time;
u64 runtime;
};
struct work_atoms {
struct list_head work_list;
struct thread *thread;
struct rb_node node;
u64 max_lat;
u64 max_lat_start;
u64 max_lat_end;
u64 total_lat;
u64 nb_atoms;
u64 total_runtime;
int num_merged;
};
typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
struct perf_sched;
struct trace_sched_handler {
int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
/* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
int (*fork_event)(struct perf_sched *sched, union perf_event *event,
struct machine *machine);
int (*migrate_task_event)(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
};
#define COLOR_PIDS PERF_COLOR_BLUE
#define COLOR_CPUS PERF_COLOR_BG_RED
struct perf_sched_map {
DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
struct perf_cpu *comp_cpus;
bool comp;
struct perf_thread_map *color_pids;
const char *color_pids_str;
struct perf_cpu_map *color_cpus;
const char *color_cpus_str;
struct perf_cpu_map *cpus;
const char *cpus_str;
};
struct perf_sched {
struct perf_tool tool;
const char *sort_order;
unsigned long nr_tasks;
struct task_desc **pid_to_task;
struct task_desc **tasks;
const struct trace_sched_handler *tp_handler;
struct mutex start_work_mutex;
struct mutex work_done_wait_mutex;
int profile_cpu;
/*
* Track the current task - that way we can know whether there's any
* weird events, such as a task being switched away that is not current.
*/
struct perf_cpu max_cpu;
u32 *curr_pid;
struct thread **curr_thread;
char next_shortname1;
char next_shortname2;
unsigned int replay_repeat;
unsigned long nr_run_events;
unsigned long nr_sleep_events;
unsigned long nr_wakeup_events;
unsigned long nr_sleep_corrections;
unsigned long nr_run_events_optimized;
unsigned long targetless_wakeups;
unsigned long multitarget_wakeups;
unsigned long nr_runs;
unsigned long nr_timestamps;
unsigned long nr_unordered_timestamps;
unsigned long nr_context_switch_bugs;
unsigned long nr_events;
unsigned long nr_lost_chunks;
unsigned long nr_lost_events;
u64 run_measurement_overhead;
u64 sleep_measurement_overhead;
u64 start_time;
u64 cpu_usage;
u64 runavg_cpu_usage;
u64 parent_cpu_usage;
u64 runavg_parent_cpu_usage;
u64 sum_runtime;
u64 sum_fluct;
u64 run_avg;
u64 all_runtime;
u64 all_count;
u64 *cpu_last_switched;
struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
struct perf_sched_map map;
/* options for timehist command */
bool summary;
bool summary_only;
bool idle_hist;
bool show_callchain;
unsigned int max_stack;
bool show_cpu_visual;
bool show_wakeups;
bool show_next;
bool show_migrations;
bool show_state;
u64 skipped_samples;
const char *time_str;
struct perf_time_interval ptime;
struct perf_time_interval hist_time;
volatile bool thread_funcs_exit;
};
/* per thread run time data */
struct thread_runtime {
u64 last_time; /* time of previous sched in/out event */
u64 dt_run; /* run time */
u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
u64 dt_delay; /* time between wakeup and sched-in */
u64 ready_to_run; /* time of wakeup */
struct stats run_stats;
u64 total_run_time;
u64 total_sleep_time;
u64 total_iowait_time;
u64 total_preempt_time;
u64 total_delay_time;
int last_state;
char shortname[3];
bool comm_changed;
u64 migrations;
};
/* per event run time data */
struct evsel_runtime {
u64 *last_time; /* time this event was last seen per cpu */
u32 ncpu; /* highest cpu slot allocated */
};
/* per cpu idle time data */
struct idle_thread_runtime {
struct thread_runtime tr;
struct thread *last_thread;
struct rb_root_cached sorted_root;
struct callchain_root callchain;
struct callchain_cursor cursor;
};
/* track idle times per cpu */
static struct thread **idle_threads;
static int idle_max_cpu;
static char idle_comm[] = "<idle>";
static u64 get_nsecs(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
}
static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
{
u64 T0 = get_nsecs(), T1;
do {
T1 = get_nsecs();
} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
}
static void sleep_nsecs(u64 nsecs)
{
struct timespec ts;
ts.tv_nsec = nsecs % 999999999;
ts.tv_sec = nsecs / 999999999;
nanosleep(&ts, NULL);
}
static void calibrate_run_measurement_overhead(struct perf_sched *sched)
{
u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
int i;
for (i = 0; i < 10; i++) {
T0 = get_nsecs();
burn_nsecs(sched, 0);
T1 = get_nsecs();
delta = T1-T0;
min_delta = min(min_delta, delta);
}
sched->run_measurement_overhead = min_delta;
printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
{
u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
int i;
for (i = 0; i < 10; i++) {
T0 = get_nsecs();
sleep_nsecs(10000);
T1 = get_nsecs();
delta = T1-T0;
min_delta = min(min_delta, delta);
}
min_delta -= 10000;
sched->sleep_measurement_overhead = min_delta;
printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
static struct sched_atom *
get_new_event(struct task_desc *task, u64 timestamp)
{
struct sched_atom *event = zalloc(sizeof(*event));
unsigned long idx = task->nr_events;
size_t size;
event->timestamp = timestamp;
event->nr = idx;
task->nr_events++;
size = sizeof(struct sched_atom *) * task->nr_events;
task->atoms = realloc(task->atoms, size);
BUG_ON(!task->atoms);
task->atoms[idx] = event;
return event;
}
static struct sched_atom *last_event(struct task_desc *task)
{
if (!task->nr_events)
return NULL;
return task->atoms[task->nr_events - 1];
}
static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
u64 timestamp, u64 duration)
{
struct sched_atom *event, *curr_event = last_event(task);
/*
* optimize an existing RUN event by merging this one
* to it:
*/
if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
sched->nr_run_events_optimized++;
curr_event->duration += duration;
return;
}
event = get_new_event(task, timestamp);
event->type = SCHED_EVENT_RUN;
event->duration = duration;
sched->nr_run_events++;
}
static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
u64 timestamp, struct task_desc *wakee)
{
struct sched_atom *event, *wakee_event;
event = get_new_event(task, timestamp);
event->type = SCHED_EVENT_WAKEUP;
event->wakee = wakee;
wakee_event = last_event(wakee);
if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
sched->targetless_wakeups++;
return;
}
if (wakee_event->wait_sem) {
sched->multitarget_wakeups++;
return;
}
wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
sem_init(wakee_event->wait_sem, 0, 0);
wakee_event->specific_wait = 1;
event->wait_sem = wakee_event->wait_sem;
sched->nr_wakeup_events++;
}
static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
u64 timestamp, u64 task_state __maybe_unused)
{
struct sched_atom *event = get_new_event(task, timestamp);
event->type = SCHED_EVENT_SLEEP;
sched->nr_sleep_events++;
}
static struct task_desc *register_pid(struct perf_sched *sched,
unsigned long pid, const char *comm)
{
struct task_desc *task;
static int pid_max;
if (sched->pid_to_task == NULL) {
if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
pid_max = MAX_PID;
BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
}
if (pid >= (unsigned long)pid_max) {
BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
sizeof(struct task_desc *))) == NULL);
while (pid >= (unsigned long)pid_max)
sched->pid_to_task[pid_max++] = NULL;
}
task = sched->pid_to_task[pid];
if (task)
return task;
task = zalloc(sizeof(*task));
task->pid = pid;
task->nr = sched->nr_tasks;
strcpy(task->comm, comm);
/*
* every task starts in sleeping state - this gets ignored
* if there's no wakeup pointing to this sleep state:
*/
add_sched_event_sleep(sched, task, 0, 0);
sched->pid_to_task[pid] = task;
sched->nr_tasks++;
sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
BUG_ON(!sched->tasks);
sched->tasks[task->nr] = task;
if (verbose > 0)
printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
return task;
}
static void print_task_traces(struct perf_sched *sched)
{
struct task_desc *task;
unsigned long i;
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
task->nr, task->comm, task->pid, task->nr_events);
}
}
static void add_cross_task_wakeups(struct perf_sched *sched)
{
struct task_desc *task1, *task2;
unsigned long i, j;
for (i = 0; i < sched->nr_tasks; i++) {
task1 = sched->tasks[i];
j = i + 1;
if (j == sched->nr_tasks)
j = 0;
task2 = sched->tasks[j];
add_sched_event_wakeup(sched, task1, 0, task2);
}
}
static void perf_sched__process_event(struct perf_sched *sched,
struct sched_atom *atom)
{
int ret = 0;
switch (atom->type) {
case SCHED_EVENT_RUN:
burn_nsecs(sched, atom->duration);
break;
case SCHED_EVENT_SLEEP:
if (atom->wait_sem)
ret = sem_wait(atom->wait_sem);
BUG_ON(ret);
break;
case SCHED_EVENT_WAKEUP:
if (atom->wait_sem)
ret = sem_post(atom->wait_sem);
BUG_ON(ret);
break;
case SCHED_EVENT_MIGRATION:
break;
default:
BUG_ON(1);
}
}
static u64 get_cpu_usage_nsec_parent(void)
{
struct rusage ru;
u64 sum;
int err;
err = getrusage(RUSAGE_SELF, &ru);
BUG_ON(err);
sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
return sum;
}
static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
{
struct perf_event_attr attr;
char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
int fd;
struct rlimit limit;
bool need_privilege = false;
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_TASK_CLOCK;
force_again:
fd = sys_perf_event_open(&attr, 0, -1, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
if (errno == EMFILE) {
if (sched->force) {
BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
limit.rlim_cur += sched->nr_tasks - cur_task;
if (limit.rlim_cur > limit.rlim_max) {
limit.rlim_max = limit.rlim_cur;
need_privilege = true;
}
if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
if (need_privilege && errno == EPERM)
strcpy(info, "Need privilege\n");
} else
goto force_again;
} else
strcpy(info, "Have a try with -f option\n");
}
pr_err("Error: sys_perf_event_open() syscall returned "
"with %d (%s)\n%s", fd,
str_error_r(errno, sbuf, sizeof(sbuf)), info);
exit(EXIT_FAILURE);
}
return fd;
}
static u64 get_cpu_usage_nsec_self(int fd)
{
u64 runtime;
int ret;
ret = read(fd, &runtime, sizeof(runtime));
BUG_ON(ret != sizeof(runtime));
return runtime;
}
struct sched_thread_parms {
struct task_desc *task;
struct perf_sched *sched;
int fd;
};
static void *thread_func(void *ctx)
{
struct sched_thread_parms *parms = ctx;
struct task_desc *this_task = parms->task;
struct perf_sched *sched = parms->sched;
u64 cpu_usage_0, cpu_usage_1;
unsigned long i, ret;
char comm2[22];
int fd = parms->fd;
zfree(&parms);
sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2);
if (fd < 0)
return NULL;
while (!sched->thread_funcs_exit) {
ret = sem_post(&this_task->ready_for_work);
BUG_ON(ret);
mutex_lock(&sched->start_work_mutex);
mutex_unlock(&sched->start_work_mutex);
cpu_usage_0 = get_cpu_usage_nsec_self(fd);
for (i = 0; i < this_task->nr_events; i++) {
this_task->curr_event = i;
perf_sched__process_event(sched, this_task->atoms[i]);
}
cpu_usage_1 = get_cpu_usage_nsec_self(fd);
this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
ret = sem_post(&this_task->work_done_sem);
BUG_ON(ret);
mutex_lock(&sched->work_done_wait_mutex);
mutex_unlock(&sched->work_done_wait_mutex);
}
return NULL;
}
static void create_tasks(struct perf_sched *sched)
EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
{
struct task_desc *task;
pthread_attr_t attr;
unsigned long i;
int err;
err = pthread_attr_init(&attr);
BUG_ON(err);
err = pthread_attr_setstacksize(&attr,
(size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
BUG_ON(err);
mutex_lock(&sched->start_work_mutex);
mutex_lock(&sched->work_done_wait_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
struct sched_thread_parms *parms = malloc(sizeof(*parms));
BUG_ON(parms == NULL);
parms->task = task = sched->tasks[i];
parms->sched = sched;
parms->fd = self_open_counters(sched, i);
sem_init(&task->sleep_sem, 0, 0);
sem_init(&task->ready_for_work, 0, 0);
sem_init(&task->work_done_sem, 0, 0);
task->curr_event = 0;
err = pthread_create(&task->thread, &attr, thread_func, parms);
BUG_ON(err);
}
}
static void destroy_tasks(struct perf_sched *sched)
UNLOCK_FUNCTION(sched->start_work_mutex)
UNLOCK_FUNCTION(sched->work_done_wait_mutex)
{
struct task_desc *task;
unsigned long i;
int err;
mutex_unlock(&sched->start_work_mutex);
mutex_unlock(&sched->work_done_wait_mutex);
/* Get rid of threads so they won't be upset by mutex destrunction */
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
err = pthread_join(task->thread, NULL);
BUG_ON(err);
sem_destroy(&task->sleep_sem);
sem_destroy(&task->ready_for_work);
sem_destroy(&task->work_done_sem);
}
}
static void wait_for_tasks(struct perf_sched *sched)
EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
{
u64 cpu_usage_0, cpu_usage_1;
struct task_desc *task;
unsigned long i, ret;
sched->start_time = get_nsecs();
sched->cpu_usage = 0;
mutex_unlock(&sched->work_done_wait_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
ret = sem_wait(&task->ready_for_work);
BUG_ON(ret);
sem_init(&task->ready_for_work, 0, 0);
}
mutex_lock(&sched->work_done_wait_mutex);
cpu_usage_0 = get_cpu_usage_nsec_parent();
mutex_unlock(&sched->start_work_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
ret = sem_wait(&task->work_done_sem);
BUG_ON(ret);
sem_init(&task->work_done_sem, 0, 0);
sched->cpu_usage += task->cpu_usage;
task->cpu_usage = 0;
}
cpu_usage_1 = get_cpu_usage_nsec_parent();
if (!sched->runavg_cpu_usage)
sched->runavg_cpu_usage = sched->cpu_usage;
sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
if (!sched->runavg_parent_cpu_usage)
sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
sched->parent_cpu_usage)/sched->replay_repeat;
mutex_lock(&sched->start_work_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
sem_init(&task->sleep_sem, 0, 0);
task->curr_event = 0;
}
}
static void run_one_test(struct perf_sched *sched)
EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
{
u64 T0, T1, delta, avg_delta, fluct;
T0 = get_nsecs();
wait_for_tasks(sched);
T1 = get_nsecs();
delta = T1 - T0;
sched->sum_runtime += delta;
sched->nr_runs++;
avg_delta = sched->sum_runtime / sched->nr_runs;
if (delta < avg_delta)
fluct = avg_delta - delta;
else
fluct = delta - avg_delta;
sched->sum_fluct += fluct;
if (!sched->run_avg)
sched->run_avg = delta;
sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
printf("cpu: %0.2f / %0.2f",
(double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
#if 0
/*
* rusage statistics done by the parent, these are less
* accurate than the sched->sum_exec_runtime based statistics:
*/
printf(" [%0.2f / %0.2f]",
(double)sched->parent_cpu_usage / NSEC_PER_MSEC,
(double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
#endif
printf("\n");
if (sched->nr_sleep_corrections)
printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
sched->nr_sleep_corrections = 0;
}
static void test_calibrations(struct perf_sched *sched)
{
u64 T0, T1;
T0 = get_nsecs();
burn_nsecs(sched, NSEC_PER_MSEC);
T1 = get_nsecs();
printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
T0 = get_nsecs();
sleep_nsecs(NSEC_PER_MSEC);
T1 = get_nsecs();
printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
}
static int
replay_wakeup_event(struct perf_sched *sched,
struct evsel *evsel, struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
const char *comm = evsel__strval(evsel, sample, "comm");
const u32 pid = evsel__intval(evsel, sample, "pid");
struct task_desc *waker, *wakee;
if (verbose > 0) {
printf("sched_wakeup event %p\n", evsel);
printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
}
waker = register_pid(sched, sample->tid, "<unknown>");
wakee = register_pid(sched, pid, comm);
add_sched_event_wakeup(sched, waker, sample->time, wakee);
return 0;
}
static int replay_switch_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
*next_comm = evsel__strval(evsel, sample, "next_comm");
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
struct task_desc *prev, __maybe_unused *next;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu;
s64 delta;
if (verbose > 0)
printf("sched_switch event %p\n", evsel);
if (cpu >= MAX_CPUS || cpu < 0)
return 0;
timestamp0 = sched->cpu_last_switched[cpu];
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
if (delta < 0) {
pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
return -1;
}
pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
prev_comm, prev_pid, next_comm, next_pid, delta);
prev = register_pid(sched, prev_pid, prev_comm);
next = register_pid(sched, next_pid, next_comm);
sched->cpu_last_switched[cpu] = timestamp;
add_sched_event_run(sched, prev, timestamp, delta);
add_sched_event_sleep(sched, prev, timestamp, prev_state);
return 0;
}
static int replay_fork_event(struct perf_sched *sched,
union perf_event *event,
struct machine *machine)
{
struct thread *child, *parent;
child = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
parent = machine__findnew_thread(machine, event->fork.ppid,
event->fork.ptid);
if (child == NULL || parent == NULL) {
pr_debug("thread does not exist on fork event: child %p, parent %p\n",
child, parent);
goto out_put;
}
if (verbose > 0) {
printf("fork event\n");
printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
}
register_pid(sched, thread__tid(parent), thread__comm_str(parent));
register_pid(sched, thread__tid(child), thread__comm_str(child));
out_put:
thread__put(child);
thread__put(parent);
return 0;
}
struct sort_dimension {
const char *name;
sort_fn_t cmp;
struct list_head list;
};
/*
* handle runtime stats saved per thread
*/
static struct thread_runtime *thread__init_runtime(struct thread *thread)
{
struct thread_runtime *r;
r = zalloc(sizeof(struct thread_runtime));
if (!r)
return NULL;
init_stats(&r->run_stats);
thread__set_priv(thread, r);
return r;
}
static struct thread_runtime *thread__get_runtime(struct thread *thread)
{
struct thread_runtime *tr;
tr = thread__priv(thread);
if (tr == NULL) {
tr = thread__init_runtime(thread);
if (tr == NULL)
pr_debug("Failed to malloc memory for runtime data.\n");
}
return tr;
}
static int
thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
{
struct sort_dimension *sort;
int ret = 0;
BUG_ON(list_empty(list));
list_for_each_entry(sort, list, list) {
ret = sort->cmp(l, r);
if (ret)
return ret;
}
return ret;
}
static struct work_atoms *
thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
struct list_head *sort_list)
{
struct rb_node *node = root->rb_root.rb_node;
struct work_atoms key = { .thread = thread };
while (node) {
struct work_atoms *atoms;
int cmp;
atoms = container_of(node, struct work_atoms, node);
cmp = thread_lat_cmp(sort_list, &key, atoms);
if (cmp > 0)
node = node->rb_left;
else if (cmp < 0)
node = node->rb_right;
else {
BUG_ON(thread != atoms->thread);
return atoms;
}
}
return NULL;
}
static void
__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
struct list_head *sort_list)
{
struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
bool leftmost = true;
while (*new) {
struct work_atoms *this;
int cmp;
this = container_of(*new, struct work_atoms, node);
parent = *new;
cmp = thread_lat_cmp(sort_list, data, this);
if (cmp > 0)
new = &((*new)->rb_left);
else {
new = &((*new)->rb_right);
leftmost = false;
}
}
rb_link_node(&data->node, parent, new);
rb_insert_color_cached(&data->node, root, leftmost);
}
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
{
struct work_atoms *atoms = zalloc(sizeof(*atoms));
if (!atoms) {
pr_err("No memory at %s\n", __func__);
return -1;
}
atoms->thread = thread__get(thread);
INIT_LIST_HEAD(&atoms->work_list);
__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
return 0;
}
static char sched_out_state(u64 prev_state)
{
const char *str = TASK_STATE_TO_CHAR_STR;
return str[prev_state];
}
static int
add_sched_out_event(struct work_atoms *atoms,
char run_state,
u64 timestamp)
{
struct work_atom *atom = zalloc(sizeof(*atom));
if (!atom) {
pr_err("Non memory at %s", __func__);
return -1;
}
atom->sched_out_time = timestamp;
if (run_state == 'R') {
atom->state = THREAD_WAIT_CPU;
atom->wake_up_time = atom->sched_out_time;
}
list_add_tail(&atom->list, &atoms->work_list);
return 0;
}
static void
add_runtime_event(struct work_atoms *atoms, u64 delta,
u64 timestamp __maybe_unused)
{
struct work_atom *atom;
BUG_ON(list_empty(&atoms->work_list));
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
atom->runtime += delta;
atoms->total_runtime += delta;
}
static void
add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
{
struct work_atom *atom;
u64 delta;
if (list_empty(&atoms->work_list))
return;
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
if (atom->state != THREAD_WAIT_CPU)
return;
if (timestamp < atom->wake_up_time) {
atom->state = THREAD_IGNORE;
return;
}
atom->state = THREAD_SCHED_IN;
atom->sched_in_time = timestamp;
delta = atom->sched_in_time - atom->wake_up_time;
atoms->total_lat += delta;
if (delta > atoms->max_lat) {
atoms->max_lat = delta;
atoms->max_lat_start = atom->wake_up_time;
atoms->max_lat_end = timestamp;
}
atoms->nb_atoms++;
}
static int latency_switch_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu, err = -1;
s64 delta;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
timestamp0 = sched->cpu_last_switched[cpu];
sched->cpu_last_switched[cpu] = timestamp;
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
if (delta < 0) {
pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
return -1;
}
sched_out = machine__findnew_thread(machine, -1, prev_pid);
sched_in = machine__findnew_thread(machine, -1, next_pid);
if (sched_out == NULL || sched_in == NULL)
goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
if (thread_atoms_insert(sched, sched_out))
goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
pr_err("out-event: Internal tree error");
goto out_put;
}
}
if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
return -1;
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
if (thread_atoms_insert(sched, sched_in))
goto out_put;
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
pr_err("in-event: Internal tree error");
goto out_put;
}
/*
* Take came in we have not heard about yet,
* add in an initial atom in runnable state:
*/
if (add_sched_out_event(in_events, 'R', timestamp))
goto out_put;
}
add_sched_in_event(in_events, timestamp);
err = 0;
out_put:
thread__put(sched_out);
thread__put(sched_in);
return err;
}
static int latency_runtime_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
const u32 pid = evsel__intval(evsel, sample, "pid");
const u64 runtime = evsel__intval(evsel, sample, "runtime");
struct thread *thread = machine__findnew_thread(machine, -1, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time;
int cpu = sample->cpu, err = -1;
if (thread == NULL)
return -1;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
if (!atoms) {
if (thread_atoms_insert(sched, thread))
goto out_put;
atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
if (!atoms) {
pr_err("in-event: Internal tree error");
goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
goto out_put;
}
add_runtime_event(atoms, runtime, timestamp);
err = 0;
out_put:
thread__put(thread);
return err;
}
static int latency_wakeup_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
const u32 pid = evsel__intval(evsel, sample, "pid");
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *wakee;
u64 timestamp = sample->time;
int err = -1;
wakee = machine__findnew_thread(machine, -1, pid);
if (wakee == NULL)
return -1;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, wakee))
goto out_put;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
pr_err("wakeup-event: Internal tree error");
goto out_put;
}
if (add_sched_out_event(atoms, 'S', timestamp))
goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
/*
* As we do not guarantee the wakeup event happens when
* task is out of run queue, also may happen when task is
* on run queue and wakeup only change ->state to TASK_RUNNING,
* then we should not set the ->wake_up_time when wake up a
* task which is on run queue.
*
* You WILL be missing events if you've recorded only
* one CPU, or are only looking at only one, so don't
* skip in this case.
*/
if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
goto out_ok;
sched->nr_timestamps++;
if (atom->sched_out_time > timestamp) {
sched->nr_unordered_timestamps++;
goto out_ok;
}
atom->state = THREAD_WAIT_CPU;
atom->wake_up_time = timestamp;
out_ok:
err = 0;
out_put:
thread__put(wakee);
return err;
}
static int latency_migrate_task_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
const u32 pid = evsel__intval(evsel, sample, "pid");
u64 timestamp = sample->time;
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *migrant;
int err = -1;
/*
* Only need to worry about migration when profiling one CPU.
*/
if (sched->profile_cpu == -1)
return 0;
migrant = machine__findnew_thread(machine, -1, pid);
if (migrant == NULL)
return -1;
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, migrant))
goto out_put;
register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
pr_err("migration-event: Internal tree error");
goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
sched->nr_timestamps++;
if (atom->sched_out_time > timestamp)
sched->nr_unordered_timestamps++;
err = 0;
out_put:
thread__put(migrant);
return err;
}
static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
{
int i;
int ret;
u64 avg;
char max_lat_start[32], max_lat_end[32];
if (!work_list->nb_atoms)
return;
/*
* Ignore idle threads:
*/
if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
return;
sched->all_runtime += work_list->total_runtime;
sched->all_count += work_list->nb_atoms;
if (work_list->num_merged > 1) {
ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
work_list->num_merged);
} else {
ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
thread__tid(work_list->thread));
}
for (i = 0; i < 24 - ret; i++)
printf(" ");
avg = work_list->total_lat / work_list->nb_atoms;
timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
(double)work_list->total_runtime / NSEC_PER_MSEC,
work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
(double)work_list->max_lat / NSEC_PER_MSEC,
max_lat_start, max_lat_end);
}
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
{
pid_t l_tid, r_tid;
if (RC_CHK_ACCESS(l->thread) == RC_CHK_ACCESS(r->thread))
return 0;
l_tid = thread__tid(l->thread);
r_tid = thread__tid(r->thread);
if (l_tid < r_tid)
return -1;
if (l_tid > r_tid)
return 1;
return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
}
static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
{
u64 avgl, avgr;
if (!l->nb_atoms)
return -1;
if (!r->nb_atoms)
return 1;
avgl = l->total_lat / l->nb_atoms;
avgr = r->total_lat / r->nb_atoms;
if (avgl < avgr)
return -1;
if (avgl > avgr)
return 1;
return 0;
}
static int max_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->max_lat < r->max_lat)
return -1;
if (l->max_lat > r->max_lat)
return 1;
return 0;
}
static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->nb_atoms < r->nb_atoms)
return -1;
if (l->nb_atoms > r->nb_atoms)
return 1;
return 0;
}
static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->total_runtime < r->total_runtime)
return -1;
if (l->total_runtime > r->total_runtime)
return 1;
return 0;
}
static int sort_dimension__add(const char *tok, struct list_head *list)
{
size_t i;
static struct sort_dimension avg_sort_dimension = {
.name = "avg",
.cmp = avg_cmp,
};
static struct sort_dimension max_sort_dimension = {
.name = "max",
.cmp = max_cmp,
};
static struct sort_dimension pid_sort_dimension = {
.name = "pid",
.cmp = pid_cmp,
};
static struct sort_dimension runtime_sort_dimension = {
.name = "runtime",
.cmp = runtime_cmp,
};
static struct sort_dimension switch_sort_dimension = {
.name = "switch",
.cmp = switch_cmp,
};
struct sort_dimension *available_sorts[] = {
&pid_sort_dimension,
&avg_sort_dimension,
&max_sort_dimension,
&switch_sort_dimension,
&runtime_sort_dimension,
};
for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
if (!strcmp(available_sorts[i]->name, tok)) {
list_add_tail(&available_sorts[i]->list, list);
return 0;
}
}
return -1;
}
static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
struct rb_root_cached *root = &sched->atom_root;
again:
for (;;) {
struct work_atoms *data;
node = rb_first_cached(root);
if (!node)
break;
rb_erase_cached(node, root);
data = rb_entry(node, struct work_atoms, node);
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
if (root == &sched->atom_root) {
root = &sched->merged_atom_root;
goto again;
}
}
static int process_sched_wakeup_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
if (sched->tp_handler->wakeup_event)
return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
return 0;
}
static int process_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
struct evsel *evsel __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
union map_priv {
void *ptr;
bool color;
};
static bool thread__has_color(struct thread *thread)
{
union map_priv priv = {
.ptr = thread__priv(thread),
};
return priv.color;
}
static struct thread*
map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
{
struct thread *thread = machine__findnew_thread(machine, pid, tid);
union map_priv priv = {
.color = false,
};
if (!sched->map.color_pids || !thread || thread__priv(thread))
return thread;
if (thread_map__has(sched->map.color_pids, tid))
priv.color = true;
thread__set_priv(thread, priv.ptr);
return thread;
}
static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine)
{
const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
struct thread *sched_in;
struct thread_runtime *tr;
int new_shortname;
u64 timestamp0, timestamp = sample->time;
s64 delta;
int i;
struct perf_cpu this_cpu = {
.cpu = sample->cpu,
};
int cpus_nr;
bool new_cpu = false;
const char *color = PERF_COLOR_NORMAL;
char stimestamp[32];
BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
if (this_cpu.cpu > sched->max_cpu.cpu)
sched->max_cpu = this_cpu;
if (sched->map.comp) {
cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
sched->map.comp_cpus[cpus_nr++] = this_cpu;
new_cpu = true;
}
} else
cpus_nr = sched->max_cpu.cpu;
timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
sched->cpu_last_switched[this_cpu.cpu] = timestamp;
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
if (delta < 0) {
pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
return -1;
}
sched_in = map__findnew_thread(sched, machine, -1, next_pid);
if (sched_in == NULL)
return -1;
tr = thread__get_runtime(sched_in);
if (tr == NULL) {
thread__put(sched_in);
return -1;
}
sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
printf(" ");
new_shortname = 0;
if (!tr->shortname[0]) {
if (!strcmp(thread__comm_str(sched_in), "swapper")) {
/*
* Don't allocate a letter-number for swapper:0
* as a shortname. Instead, we use '.' for it.
*/
tr->shortname[0] = '.';
tr->shortname[1] = ' ';
} else {
tr->shortname[0] = sched->next_shortname1;
tr->shortname[1] = sched->next_shortname2;
if (sched->next_shortname1 < 'Z') {
sched->next_shortname1++;
} else {
sched->next_shortname1 = 'A';
if (sched->next_shortname2 < '9')
sched->next_shortname2++;
else
sched->next_shortname2 = '0';
}
}
new_shortname = 1;
}
for (i = 0; i < cpus_nr; i++) {
struct perf_cpu cpu = {
.cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
};
struct thread *curr_thread = sched->curr_thread[cpu.cpu];
struct thread_runtime *curr_tr;
const char *pid_color = color;
const char *cpu_color = color;
if (curr_thread && thread__has_color(curr_thread))
pid_color = COLOR_PIDS;
if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
continue;
if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
cpu_color = COLOR_CPUS;
if (cpu.cpu != this_cpu.cpu)
color_fprintf(stdout, color, " ");
else
color_fprintf(stdout, cpu_color, "*");
if (sched->curr_thread[cpu.cpu]) {
curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
if (curr_tr == NULL) {
thread__put(sched_in);
return -1;
}
color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
} else
color_fprintf(stdout, color, " ");
}
if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
goto out;
timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
color_fprintf(stdout, color, " %12s secs ", stimestamp);
if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
const char *pid_color = color;
if (thread__has_color(sched_in))
pid_color = COLOR_PIDS;
color_fprintf(stdout, pid_color, "%s => %s:%d",
tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
tr->comm_changed = false;
}
if (sched->map.comp && new_cpu)
color_fprintf(stdout, color, " (CPU %d)", this_cpu);
out:
color_fprintf(stdout, color, "\n");
thread__put(sched_in);
return 0;
}
static int process_sched_switch_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
int this_cpu = sample->cpu, err = 0;
u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
if (sched->curr_pid[this_cpu] != (u32)-1) {
/*
* Are we trying to switch away a PID that is
* not current?
*/
if (sched->curr_pid[this_cpu] != prev_pid)
sched->nr_context_switch_bugs++;
}
if (sched->tp_handler->switch_event)
err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
sched->curr_pid[this_cpu] = next_pid;
return err;
}
static int process_sched_runtime_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
if (sched->tp_handler->runtime_event)
return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
return 0;
}
static int perf_sched__process_fork_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
/* run the fork event through the perf machinery */
perf_event__process_fork(tool, event, sample, machine);
/* and then run additional processing needed for this command */
if (sched->tp_handler->fork_event)
return sched->tp_handler->fork_event(sched, event, machine);
return 0;
}
static int process_sched_migrate_task_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
if (sched->tp_handler->migrate_task_event)
return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
return 0;
}
typedef int (*tracepoint_handler)(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
int err = 0;
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
err = f(tool, evsel, sample, machine);
}
return err;
}
static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct thread_runtime *tr;
int err;
err = perf_event__process_comm(tool, event, sample, machine);
if (err)
return err;
thread = machine__find_thread(machine, sample->pid, sample->tid);
if (!thread) {
pr_err("Internal error: can't find thread\n");
return -1;
}
tr = thread__get_runtime(thread);
if (tr == NULL) {
thread__put(thread);
return -1;
}
tr->comm_changed = true;
thread__put(thread);
return 0;
}
static int perf_sched__read_events(struct perf_sched *sched)
{
struct evsel_str_handler handlers[] = {
{ "sched:sched_switch", process_sched_switch_event, },
{ "sched:sched_stat_runtime", process_sched_runtime_event, },
{ "sched:sched_wakeup", process_sched_wakeup_event, },
{ "sched:sched_waking", process_sched_wakeup_event, },
{ "sched:sched_wakeup_new", process_sched_wakeup_event, },
{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
};
struct perf_session *session;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = sched->force,
};
int rc = -1;
session = perf_session__new(&data, &sched->tool);
if (IS_ERR(session)) {
pr_debug("Error creating perf session");
return PTR_ERR(session);
}
symbol__init(&session->header.env);
/* prefer sched_waking if it is captured */
if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
handlers[2].handler = process_sched_wakeup_ignore;
if (perf_session__set_tracepoints_handlers(session, handlers))
goto out_delete;
if (perf_session__has_traces(session, "record -R")) {
int err = perf_session__process_events(session);
if (err) {
pr_err("Failed to process events, error %d", err);
goto out_delete;
}
sched->nr_events = session->evlist->stats.nr_events[0];
sched->nr_lost_events = session->evlist->stats.total_lost;
sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
}
rc = 0;
out_delete:
perf_session__delete(session);
return rc;
}
/*
* scheduling times are printed as msec.usec
*/
static inline void print_sched_time(unsigned long long nsecs, int width)
{
unsigned long msecs;
unsigned long usecs;
msecs = nsecs / NSEC_PER_MSEC;
nsecs -= msecs * NSEC_PER_MSEC;
usecs = nsecs / NSEC_PER_USEC;
printf("%*lu.%03lu ", width, msecs, usecs);
}
/*
* returns runtime data for event, allocating memory for it the
* first time it is used.
*/
static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
{
struct evsel_runtime *r = evsel->priv;
if (r == NULL) {
r = zalloc(sizeof(struct evsel_runtime));
evsel->priv = r;
}
return r;
}
/*
* save last time event was seen per cpu
*/
static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
{
struct evsel_runtime *r = evsel__get_runtime(evsel);
if (r == NULL)
return;
if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
int i, n = __roundup_pow_of_two(cpu+1);
void *p = r->last_time;
p = realloc(r->last_time, n * sizeof(u64));
if (!p)
return;
r->last_time = p;
for (i = r->ncpu; i < n; ++i)
r->last_time[i] = (u64) 0;
r->ncpu = n;
}
r->last_time[cpu] = timestamp;
}
/* returns last time this event was seen on the given cpu */
static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
{
struct evsel_runtime *r = evsel__get_runtime(evsel);
if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
return 0;
return r->last_time[cpu];
}
static int comm_width = 30;
static char *timehist_get_commstr(struct thread *thread)
{
static char str[32];
const char *comm = thread__comm_str(thread);
pid_t tid = thread__tid(thread);
pid_t pid = thread__pid(thread);
int n;
if (pid == 0)
n = scnprintf(str, sizeof(str), "%s", comm);
else if (tid != pid)
n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
else
n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
if (n > comm_width)
comm_width = n;
return str;
}
static void timehist_header(struct perf_sched *sched)
{
u32 ncpus = sched->max_cpu.cpu + 1;
u32 i, j;
printf("%15s %6s ", "time", "cpu");
if (sched->show_cpu_visual) {
printf(" ");
for (i = 0, j = 0; i < ncpus; ++i) {
printf("%x", j++);
if (j > 15)
j = 0;
}
printf(" ");
}
printf(" %-*s %9s %9s %9s", comm_width,
"task name", "wait time", "sch delay", "run time");
if (sched->show_state)
printf(" %s", "state");
printf("\n");
/*
* units row
*/
printf("%15s %-6s ", "", "");
if (sched->show_cpu_visual)
printf(" %*s ", ncpus, "");
printf(" %-*s %9s %9s %9s", comm_width,
"[tid/pid]", "(msec)", "(msec)", "(msec)");
if (sched->show_state)
printf(" %5s", "");
printf("\n");
/*
* separator
*/
printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
if (sched->show_cpu_visual)
printf(" %.*s ", ncpus, graph_dotted_line);
printf(" %.*s %.9s %.9s %.9s", comm_width,
graph_dotted_line, graph_dotted_line, graph_dotted_line,
graph_dotted_line);
if (sched->show_state)
printf(" %.5s", graph_dotted_line);
printf("\n");
}
static char task_state_char(struct thread *thread, int state)
{
static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
unsigned bit = state ? ffs(state) : 0;
/* 'I' for idle */
if (thread__tid(thread) == 0)
return 'I';
return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
}
static void timehist_print_sample(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct addr_location *al,
struct thread *thread,
u64 t, int state)
{
struct thread_runtime *tr = thread__priv(thread);
const char *next_comm = evsel__strval(evsel, sample, "next_comm");
const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
u32 max_cpus = sched->max_cpu.cpu + 1;
char tstr[64];
char nstr[30];
u64 wait_time;
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return;
timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
printf("%15s [%04d] ", tstr, sample->cpu);
if (sched->show_cpu_visual) {
u32 i;
char c;
printf(" ");
for (i = 0; i < max_cpus; ++i) {
/* flag idle times with 'i'; others are sched events */
if (i == sample->cpu)
c = (thread__tid(thread) == 0) ? 'i' : 's';
else
c = ' ';
printf("%c", c);
}
printf(" ");
}
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
print_sched_time(wait_time, 6);
print_sched_time(tr->dt_delay, 6);
print_sched_time(tr->dt_run, 6);
if (sched->show_state)
printf(" %5c ", task_state_char(thread, state));
if (sched->show_next) {
snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
printf(" %-*s", comm_width, nstr);
}
if (sched->show_wakeups && !sched->show_next)
printf(" %-*s", comm_width, "");
if (thread__tid(thread) == 0)
goto out;
if (sched->show_callchain)
printf(" ");
sample__fprintf_sym(sample, al, 0,
EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
EVSEL__PRINT_CALLCHAIN_ARROW |
EVSEL__PRINT_SKIP_IGNORED,
get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
out:
printf("\n");
}
/*
* Explanation of delta-time stats:
*
* t = time of current schedule out event
* tprev = time of previous sched out event
* also time of schedule-in event for current task
* last_time = time of last sched change event for current task
* (i.e, time process was last scheduled out)
* ready_to_run = time of wakeup for current task
*
* -----|------------|------------|------------|------
* last ready tprev t
* time to run
*
* |-------- dt_wait --------|
* |- dt_delay -|-- dt_run --|
*
* dt_run = run time of current task
* dt_wait = time between last schedule out event for task and tprev
* represents time spent off the cpu
* dt_delay = time between wakeup and schedule-in of task
*/
static void timehist_update_runtime_stats(struct thread_runtime *r,
u64 t, u64 tprev)
{
r->dt_delay = 0;
r->dt_sleep = 0;
r->dt_iowait = 0;
r->dt_preempt = 0;
r->dt_run = 0;
if (tprev) {
r->dt_run = t - tprev;
if (r->ready_to_run) {
if (r->ready_to_run > tprev)
pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
else
r->dt_delay = tprev - r->ready_to_run;
}
if (r->last_time > tprev)
pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
else if (r->last_time) {
u64 dt_wait = tprev - r->last_time;
if (r->last_state == TASK_RUNNING)
r->dt_preempt = dt_wait;
else if (r->last_state == TASK_UNINTERRUPTIBLE)
r->dt_iowait = dt_wait;
else
r->dt_sleep = dt_wait;
}
}
update_stats(&r->run_stats, r->dt_run);
r->total_run_time += r->dt_run;
r->total_delay_time += r->dt_delay;
r->total_sleep_time += r->dt_sleep;
r->total_iowait_time += r->dt_iowait;
r->total_preempt_time += r->dt_preempt;
}
static bool is_idle_sample(struct perf_sample *sample,
struct evsel *evsel)
{
/* pid 0 == swapper == idle task */
if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
return evsel__intval(evsel, sample, "prev_pid") == 0;
return sample->pid == 0;
}
static void save_task_callchain(struct perf_sched *sched,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct callchain_cursor *cursor;
struct thread *thread;
/* want main thread for process - has maps */
thread = machine__findnew_thread(machine, sample->pid, sample->pid);
if (thread == NULL) {
pr_debug("Failed to get thread for pid %d.\n", sample->pid);
return;
}
if (!sched->show_callchain || sample->callchain == NULL)
return;
cursor = get_tls_callchain_cursor();
if (thread__resolve_callchain(thread, cursor, evsel, sample,
NULL, NULL, sched->max_stack + 2) != 0) {
if (verbose > 0)
pr_err("Failed to resolve callchain. Skipping\n");
return;
}
callchain_cursor_commit(cursor);
while (true) {
struct callchain_cursor_node *node;
struct symbol *sym;
node = callchain_cursor_current(cursor);
if (node == NULL)
break;
sym = node->ms.sym;
if (sym) {
if (!strcmp(sym->name, "schedule") ||
!strcmp(sym->name, "__schedule") ||
!strcmp(sym->name, "preempt_schedule"))
sym->ignore = 1;
}
callchain_cursor_advance(cursor);
}
}
static int init_idle_thread(struct thread *thread)
{
struct idle_thread_runtime *itr;
thread__set_comm(thread, idle_comm, 0);
itr = zalloc(sizeof(*itr));
if (itr == NULL)
return -ENOMEM;
init_stats(&itr->tr.run_stats);
callchain_init(&itr->callchain);
callchain_cursor_reset(&itr->cursor);
thread__set_priv(thread, itr);
return 0;
}
/*
* Track idle stats per cpu by maintaining a local thread
* struct for the idle task on each cpu.
*/
static int init_idle_threads(int ncpu)
{
int i, ret;
idle_threads = zalloc(ncpu * sizeof(struct thread *));
if (!idle_threads)
return -ENOMEM;
idle_max_cpu = ncpu;
/* allocate the actual thread struct if needed */
for (i = 0; i < ncpu; ++i) {
idle_threads[i] = thread__new(0, 0);
if (idle_threads[i] == NULL)
return -ENOMEM;
ret = init_idle_thread(idle_threads[i]);
if (ret < 0)
return ret;
}
return 0;
}
static void free_idle_threads(void)
{
int i;
if (idle_threads == NULL)
return;
for (i = 0; i < idle_max_cpu; ++i) {
if ((idle_threads[i]))
thread__delete(idle_threads[i]);
}
free(idle_threads);
}
static struct thread *get_idle_thread(int cpu)
{
/*
* expand/allocate array of pointers to local thread
* structs if needed
*/
if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
int i, j = __roundup_pow_of_two(cpu+1);
void *p;
p = realloc(idle_threads, j * sizeof(struct thread *));
if (!p)
return NULL;
idle_threads = (struct thread **) p;
for (i = idle_max_cpu; i < j; ++i)
idle_threads[i] = NULL;
idle_max_cpu = j;
}
/* allocate a new thread struct if needed */
if (idle_threads[cpu] == NULL) {
idle_threads[cpu] = thread__new(0, 0);
if (idle_threads[cpu]) {
if (init_idle_thread(idle_threads[cpu]) < 0)
return NULL;
}
}
return idle_threads[cpu];
}
static void save_idle_callchain(struct perf_sched *sched,
struct idle_thread_runtime *itr,
struct perf_sample *sample)
{
struct callchain_cursor *cursor;
if (!sched->show_callchain || sample->callchain == NULL)
return;
cursor = get_tls_callchain_cursor();
if (cursor == NULL)
return;
callchain_cursor__copy(&itr->cursor, cursor);
}
static struct thread *timehist_get_thread(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine,
struct evsel *evsel)
{
struct thread *thread;
if (is_idle_sample(sample, evsel)) {
thread = get_idle_thread(sample->cpu);
if (thread == NULL)
pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
} else {
/* there were samples with tid 0 but non-zero pid */
thread = machine__findnew_thread(machine, sample->pid,
sample->tid ?: sample->pid);
if (thread == NULL) {
pr_debug("Failed to get thread for tid %d. skipping sample.\n",
sample->tid);
}
save_task_callchain(sched, sample, evsel, machine);
if (sched->idle_hist) {
struct thread *idle;
struct idle_thread_runtime *itr;
idle = get_idle_thread(sample->cpu);
if (idle == NULL) {
pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
return NULL;
}
itr = thread__priv(idle);
if (itr == NULL)
return NULL;
itr->last_thread = thread;
/* copy task callchain when entering to idle */
if (evsel__intval(evsel, sample, "next_pid") == 0)
save_idle_callchain(sched, itr, sample);
}
}
return thread;
}
static bool timehist_skip_sample(struct perf_sched *sched,
struct thread *thread,
struct evsel *evsel,
struct perf_sample *sample)
{
bool rc = false;
if (thread__is_filtered(thread)) {
rc = true;
sched->skipped_samples++;
}
if (sched->idle_hist) {
if (strcmp(evsel__name(evsel), "sched:sched_switch"))
rc = true;
else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
evsel__intval(evsel, sample, "next_pid") != 0)
rc = true;
}
return rc;
}
static void timehist_print_wakeup_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine,
struct thread *awakened)
{
struct thread *thread;
char tstr[64];
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL)
return;
/* show wakeup unless both awakee and awaker are filtered */
if (timehist_skip_sample(sched, thread, evsel, sample) &&
timehist_skip_sample(sched, awakened, evsel, sample)) {
return;
}
timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
printf("%15s [%04d] ", tstr, sample->cpu);
if (sched->show_cpu_visual)
printf(" %*s ", sched->max_cpu.cpu + 1, "");
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
/* dt spacer */
printf(" %9s %9s %9s ", "", "", "");
printf("awakened: %s", timehist_get_commstr(awakened));
printf("\n");
}
static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct evsel *evsel __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
static int timehist_sched_wakeup_event(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
struct thread *thread;
struct thread_runtime *tr = NULL;
/* want pid of awakened task not pid in sample */
const u32 pid = evsel__intval(evsel, sample, "pid");
thread = machine__findnew_thread(machine, 0, pid);
if (thread == NULL)
return -1;
tr = thread__get_runtime(thread);
if (tr == NULL)
return -1;
if (tr->ready_to_run == 0)
tr->ready_to_run = sample->time;
/* show wakeups if requested */
if (sched->show_wakeups &&
!perf_time__skip_sample(&sched->ptime, sample->time))
timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
return 0;
}
static void timehist_print_migration_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine,
struct thread *migrated)
{
struct thread *thread;
char tstr[64];
u32 max_cpus;
u32 ocpu, dcpu;
if (sched->summary_only)
return;
max_cpus = sched->max_cpu.cpu + 1;
ocpu = evsel__intval(evsel, sample, "orig_cpu");
dcpu = evsel__intval(evsel, sample, "dest_cpu");
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL)
return;
if (timehist_skip_sample(sched, thread, evsel, sample) &&
timehist_skip_sample(sched, migrated, evsel, sample)) {
return;
}
timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
printf("%15s [%04d] ", tstr, sample->cpu);
if (sched->show_cpu_visual) {
u32 i;
char c;
printf(" ");
for (i = 0; i < max_cpus; ++i) {
c = (i == sample->cpu) ? 'm' : ' ';
printf("%c", c);
}
printf(" ");
}
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
/* dt spacer */
printf(" %9s %9s %9s ", "", "", "");
printf("migrated: %s", timehist_get_commstr(migrated));
printf(" cpu %d => %d", ocpu, dcpu);
printf("\n");
}
static int timehist_migrate_task_event(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
struct thread *thread;
struct thread_runtime *tr = NULL;
/* want pid of migrated task not pid in sample */
const u32 pid = evsel__intval(evsel, sample, "pid");
thread = machine__findnew_thread(machine, 0, pid);
if (thread == NULL)
return -1;
tr = thread__get_runtime(thread);
if (tr == NULL)
return -1;
tr->migrations++;
/* show migrations if requested */
timehist_print_migration_event(sched, evsel, sample, machine, thread);
return 0;
}
static int timehist_sched_change_event(struct perf_tool *tool,
union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
struct perf_time_interval *ptime = &sched->ptime;
struct addr_location al;
struct thread *thread;
struct thread_runtime *tr = NULL;
u64 tprev, t = sample->time;
int rc = 0;
int state = evsel__intval(evsel, sample, "prev_state");
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
pr_err("problem processing %d event. skipping it\n",
event->header.type);
rc = -1;
goto out;
}
thread = timehist_get_thread(sched, sample, machine, evsel);
if (thread == NULL) {
rc = -1;
goto out;
}
if (timehist_skip_sample(sched, thread, evsel, sample))
goto out;
tr = thread__get_runtime(thread);
if (tr == NULL) {
rc = -1;
goto out;
}
tprev = evsel__get_time(evsel, sample->cpu);
/*
* If start time given:
* - sample time is under window user cares about - skip sample
* - tprev is under window user cares about - reset to start of window
*/
if (ptime->start && ptime->start > t)
goto out;
if (tprev && ptime->start > tprev)
tprev = ptime->start;
/*
* If end time given:
* - previous sched event is out of window - we are done
* - sample time is beyond window user cares about - reset it
* to close out stats for time window interest
*/
if (ptime->end) {
if (tprev > ptime->end)
goto out;
if (t > ptime->end)
t = ptime->end;
}
if (!sched->idle_hist || thread__tid(thread) == 0) {
if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
timehist_update_runtime_stats(tr, t, tprev);
if (sched->idle_hist) {
struct idle_thread_runtime *itr = (void *)tr;
struct thread_runtime *last_tr;
BUG_ON(thread__tid(thread) != 0);
if (itr->last_thread == NULL)
goto out;
/* add current idle time as last thread's runtime */
last_tr = thread__get_runtime(itr->last_thread);
if (last_tr == NULL)
goto out;
timehist_update_runtime_stats(last_tr, t, tprev);
/*
* remove delta time of last thread as it's not updated
* and otherwise it will show an invalid value next
* time. we only care total run time and run stat.
*/
last_tr->dt_run = 0;
last_tr->dt_delay = 0;
last_tr->dt_sleep = 0;
last_tr->dt_iowait = 0;
last_tr->dt_preempt = 0;
if (itr->cursor.nr)
callchain_append(&itr->callchain, &itr->cursor, t - tprev);
itr->last_thread = NULL;
}
}
if (!sched->summary_only)
timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
out:
if (sched->hist_time.start == 0 && t >= ptime->start)
sched->hist_time.start = t;
if (ptime->end == 0 || t <= ptime->end)
sched->hist_time.end = t;
if (tr) {
/* time of this sched_switch event becomes last time task seen */
tr->last_time = sample->time;
/* last state is used to determine where to account wait time */
tr->last_state = state;
/* sched out event for task so reset ready to run time */
tr->ready_to_run = 0;
}
evsel__save_time(evsel, sample->time, sample->cpu);
addr_location__exit(&al);
return rc;
}
static int timehist_sched_switch_event(struct perf_tool *tool,
union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
return timehist_sched_change_event(tool, event, evsel, sample, machine);
}
static int process_lost(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
char tstr[64];
timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
printf("%15s ", tstr);
printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
return 0;
}
static void print_thread_runtime(struct thread *t,
struct thread_runtime *r)
{
double mean = avg_stats(&r->run_stats);
float stddev;
printf("%*s %5d %9" PRIu64 " ",
comm_width, timehist_get_commstr(t), thread__ppid(t),
(u64) r->run_stats.n);
print_sched_time(r->total_run_time, 8);
stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
print_sched_time(r->run_stats.min, 6);
printf(" ");
print_sched_time((u64) mean, 6);
printf(" ");
print_sched_time(r->run_stats.max, 6);
printf(" ");
printf("%5.2f", stddev);
printf(" %5" PRIu64, r->migrations);
printf("\n");
}
static void print_thread_waittime(struct thread *t,
struct thread_runtime *r)
{
printf("%*s %5d %9" PRIu64 " ",
comm_width, timehist_get_commstr(t), thread__ppid(t),
(u64) r->run_stats.n);
print_sched_time(r->total_run_time, 8);
print_sched_time(r->total_sleep_time, 6);
printf(" ");
print_sched_time(r->total_iowait_time, 6);
printf(" ");
print_sched_time(r->total_preempt_time, 6);
printf(" ");
print_sched_time(r->total_delay_time, 6);
printf("\n");
}
struct total_run_stats {
struct perf_sched *sched;
u64 sched_count;
u64 task_count;
u64 total_run_time;
};
static int show_thread_runtime(struct thread *t, void *priv)
{
struct total_run_stats *stats = priv;
struct thread_runtime *r;
if (thread__is_filtered(t))
return 0;
r = thread__priv(t);
if (r && r->run_stats.n) {
stats->task_count++;
stats->sched_count += r->run_stats.n;
stats->total_run_time += r->total_run_time;
if (stats->sched->show_state)
print_thread_waittime(t, r);
else
print_thread_runtime(t, r);
}
return 0;
}
static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
{
const char *sep = " <- ";
struct callchain_list *chain;
size_t ret = 0;
char bf[1024];
bool first;
if (node == NULL)
return 0;
ret = callchain__fprintf_folded(fp, node->parent);
first = (ret == 0);
list_for_each_entry(chain, &node->val, list) {
if (chain->ip >= PERF_CONTEXT_MAX)
continue;
if (chain->ms.sym && chain->ms.sym->ignore)
continue;
ret += fprintf(fp, "%s%s", first ? "" : sep,
callchain_list__sym_name(chain, bf, sizeof(bf),
false));
first = false;
}
return ret;
}
static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
{
size_t ret = 0;
FILE *fp = stdout;
struct callchain_node *chain;
struct rb_node *rb_node = rb_first_cached(root);
printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
graph_dotted_line);
while (rb_node) {
chain = rb_entry(rb_node, struct callchain_node, rb_node);
rb_node = rb_next(rb_node);
ret += fprintf(fp, " ");
print_sched_time(chain->hit, 12);
ret += 16; /* print_sched_time returns 2nd arg + 4 */
ret += fprintf(fp, " %8d ", chain->count);
ret += callchain__fprintf_folded(fp, chain);
ret += fprintf(fp, "\n");
}
return ret;
}
static void timehist_print_summary(struct perf_sched *sched,
struct perf_session *session)
{
struct machine *m = &session->machines.host;
struct total_run_stats totals;
u64 task_count;
struct thread *t;
struct thread_runtime *r;
int i;
u64 hist_time = sched->hist_time.end - sched->hist_time.start;
memset(&totals, 0, sizeof(totals));
totals.sched = sched;
if (sched->idle_hist) {
printf("\nIdle-time summary\n");
printf("%*s parent sched-out ", comm_width, "comm");
printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
} else if (sched->show_state) {
printf("\nWait-time summary\n");
printf("%*s parent sched-in ", comm_width, "comm");
printf(" run-time sleep iowait preempt delay\n");
} else {
printf("\nRuntime summary\n");
printf("%*s parent sched-in ", comm_width, "comm");
printf(" run-time min-run avg-run max-run stddev migrations\n");
}
printf("%*s (count) ", comm_width, "");
printf(" (msec) (msec) (msec) (msec) %s\n",
sched->show_state ? "(msec)" : "%");
printf("%.117s\n", graph_dotted_line);
machine__for_each_thread(m, show_thread_runtime, &totals);
task_count = totals.task_count;
if (!task_count)
printf("<no still running tasks>\n");
/* CPU idle stats not tracked when samples were skipped */
if (sched->skipped_samples && !sched->idle_hist)
return;
printf("\nIdle stats:\n");
for (i = 0; i < idle_max_cpu; ++i) {
if (cpu_list && !test_bit(i, cpu_bitmap))
continue;
t = idle_threads[i];
if (!t)
continue;
r = thread__priv(t);
if (r && r->run_stats.n) {
totals.sched_count += r->run_stats.n;
printf(" CPU %2d idle for ", i);
print_sched_time(r->total_run_time, 6);
printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
} else
printf(" CPU %2d idle entire time window\n", i);
}
if (sched->idle_hist && sched->show_callchain) {
callchain_param.mode = CHAIN_FOLDED;
callchain_param.value = CCVAL_PERIOD;
callchain_register_param(&callchain_param);
printf("\nIdle stats by callchain:\n");
for (i = 0; i < idle_max_cpu; ++i) {
struct idle_thread_runtime *itr;
t = idle_threads[i];
if (!t)
continue;
itr = thread__priv(t);
if (itr == NULL)
continue;
callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
0, &callchain_param);
printf(" CPU %2d:", i);
print_sched_time(itr->tr.total_run_time, 6);
printf(" msec\n");
timehist_print_idlehist_callchain(&itr->sorted_root);
printf("\n");
}
}
printf("\n"
" Total number of unique tasks: %" PRIu64 "\n"
"Total number of context switches: %" PRIu64 "\n",
totals.task_count, totals.sched_count);
printf(" Total run time (msec): ");
print_sched_time(totals.total_run_time, 2);
printf("\n");
printf(" Total scheduling time (msec): ");
print_sched_time(hist_time, 2);
printf(" (x %d)\n", sched->max_cpu.cpu);
}
typedef int (*sched_handler)(struct perf_tool *tool,
union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
static int perf_timehist__process_sample(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
int err = 0;
struct perf_cpu this_cpu = {
.cpu = sample->cpu,
};
if (this_cpu.cpu > sched->max_cpu.cpu)
sched->max_cpu = this_cpu;
if (evsel->handler != NULL) {
sched_handler f = evsel->handler;
err = f(tool, event, evsel, sample, machine);
}
return err;
}
static int timehist_check_attr(struct perf_sched *sched,
struct evlist *evlist)
{
struct evsel *evsel;
struct evsel_runtime *er;
list_for_each_entry(evsel, &evlist->core.entries, core.node) {
er = evsel__get_runtime(evsel);
if (er == NULL) {
pr_err("Failed to allocate memory for evsel runtime data\n");
return -1;
}
if (sched->show_callchain && !evsel__has_callchain(evsel)) {
pr_info("Samples do not have callchains.\n");
sched->show_callchain = 0;
symbol_conf.use_callchain = 0;
}
}
return 0;
}
static int perf_sched__timehist(struct perf_sched *sched)
{
struct evsel_str_handler handlers[] = {
{ "sched:sched_switch", timehist_sched_switch_event, },
{ "sched:sched_wakeup", timehist_sched_wakeup_event, },
{ "sched:sched_waking", timehist_sched_wakeup_event, },
{ "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
};
const struct evsel_str_handler migrate_handlers[] = {
{ "sched:sched_migrate_task", timehist_migrate_task_event, },
};
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = sched->force,
};
struct perf_session *session;
struct evlist *evlist;
int err = -1;
/*
* event handlers for timehist option
*/
sched->tool.sample = perf_timehist__process_sample;
sched->tool.mmap = perf_event__process_mmap;
sched->tool.comm = perf_event__process_comm;
sched->tool.exit = perf_event__process_exit;
sched->tool.fork = perf_event__process_fork;
sched->tool.lost = process_lost;
sched->tool.attr = perf_event__process_attr;
sched->tool.tracing_data = perf_event__process_tracing_data;
sched->tool.build_id = perf_event__process_build_id;
sched->tool.ordered_events = true;
sched->tool.ordering_requires_timestamps = true;
symbol_conf.use_callchain = sched->show_callchain;
session = perf_session__new(&data, &sched->tool);
if (IS_ERR(session))
return PTR_ERR(session);
if (cpu_list) {
err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
if (err < 0)
goto out;
}
evlist = session->evlist;
symbol__init(&session->header.env);
if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
pr_err("Invalid time string\n");
return -EINVAL;
}
if (timehist_check_attr(sched, evlist) != 0)
goto out;
setup_pager();
/* prefer sched_waking if it is captured */
if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
handlers[1].handler = timehist_sched_wakeup_ignore;
/* setup per-evsel handlers */
if (perf_session__set_tracepoints_handlers(session, handlers))
goto out;
/* sched_switch event at a minimum needs to exist */
if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
goto out;
}
if (sched->show_migrations &&
perf_session__set_tracepoints_handlers(session, migrate_handlers))
goto out;
/* pre-allocate struct for per-CPU idle stats */
sched->max_cpu.cpu = session->header.env.nr_cpus_online;
if (sched->max_cpu.cpu == 0)
sched->max_cpu.cpu = 4;
if (init_idle_threads(sched->max_cpu.cpu))
goto out;
/* summary_only implies summary option, but don't overwrite summary if set */
if (sched->summary_only)
sched->summary = sched->summary_only;
if (!sched->summary_only)
timehist_header(sched);
err = perf_session__process_events(session);
if (err) {
pr_err("Failed to process events, error %d", err);
goto out;
}
sched->nr_events = evlist->stats.nr_events[0];
sched->nr_lost_events = evlist->stats.total_lost;
sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
if (sched->summary)
timehist_print_summary(sched, session);
out:
free_idle_threads();
perf_session__delete(session);
return err;
}
static void print_bad_events(struct perf_sched *sched)
{
if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
sched->nr_unordered_timestamps, sched->nr_timestamps);
}
if (sched->nr_lost_events && sched->nr_events) {
printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
}
if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
sched->nr_context_switch_bugs, sched->nr_timestamps);
if (sched->nr_lost_events)
printf(" (due to lost events?)");
printf("\n");
}
}
static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
{
struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
struct work_atoms *this;
const char *comm = thread__comm_str(data->thread), *this_comm;
bool leftmost = true;
while (*new) {
int cmp;
this = container_of(*new, struct work_atoms, node);
parent = *new;
this_comm = thread__comm_str(this->thread);
cmp = strcmp(comm, this_comm);
if (cmp > 0) {
new = &((*new)->rb_left);
} else if (cmp < 0) {
new = &((*new)->rb_right);
leftmost = false;
} else {
this->num_merged++;
this->total_runtime += data->total_runtime;
this->nb_atoms += data->nb_atoms;
this->total_lat += data->total_lat;
list_splice(&data->work_list, &this->work_list);
if (this->max_lat < data->max_lat) {
this->max_lat = data->max_lat;
this->max_lat_start = data->max_lat_start;
this->max_lat_end = data->max_lat_end;
}
zfree(&data);
return;
}
}
data->num_merged++;
rb_link_node(&data->node, parent, new);
rb_insert_color_cached(&data->node, root, leftmost);
}
static void perf_sched__merge_lat(struct perf_sched *sched)
{
struct work_atoms *data;
struct rb_node *node;
if (sched->skip_merge)
return;
while ((node = rb_first_cached(&sched->atom_root))) {
rb_erase_cached(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
__merge_work_atoms(&sched->merged_atom_root, data);
}
}
static int perf_sched__lat(struct perf_sched *sched)
{
struct rb_node *next;
setup_pager();
if (perf_sched__read_events(sched))
return -1;
perf_sched__merge_lat(sched);
perf_sched__sort_lat(sched);
printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
next = rb_first_cached(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;
work_list = rb_entry(next, struct work_atoms, node);
output_lat_thread(sched, work_list);
next = rb_next(next);
thread__zput(work_list->thread);
}
printf(" -----------------------------------------------------------------------------------------------------------------\n");
printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
(double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
printf(" ---------------------------------------------------\n");
print_bad_events(sched);
printf("\n");
return 0;
}
static int setup_map_cpus(struct perf_sched *sched)
{
struct perf_cpu_map *map;
sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
if (sched->map.comp) {
sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
if (!sched->map.comp_cpus)
return -1;
}
if (!sched->map.cpus_str)
return 0;
map = perf_cpu_map__new(sched->map.cpus_str);
if (!map) {
pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
return -1;
}
sched->map.cpus = map;
return 0;
}
static int setup_color_pids(struct perf_sched *sched)
{
struct perf_thread_map *map;
if (!sched->map.color_pids_str)
return 0;
map = thread_map__new_by_tid_str(sched->map.color_pids_str);
if (!map) {
pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
return -1;
}
sched->map.color_pids = map;
return 0;
}
static int setup_color_cpus(struct perf_sched *sched)
{
struct perf_cpu_map *map;
if (!sched->map.color_cpus_str)
return 0;
map = perf_cpu_map__new(sched->map.color_cpus_str);
if (!map) {
pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
return -1;
}
sched->map.color_cpus = map;
return 0;
}
static int perf_sched__map(struct perf_sched *sched)
{
if (setup_map_cpus(sched))
return -1;
if (setup_color_pids(sched))
return -1;
if (setup_color_cpus(sched))
return -1;
setup_pager();
if (perf_sched__read_events(sched))
return -1;
print_bad_events(sched);
return 0;
}
static int perf_sched__replay(struct perf_sched *sched)
{
unsigned long i;
calibrate_run_measurement_overhead(sched);
calibrate_sleep_measurement_overhead(sched);
test_calibrations(sched);
if (perf_sched__read_events(sched))
return -1;
printf("nr_run_events: %ld\n", sched->nr_run_events);
printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
if (sched->targetless_wakeups)
printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
if (sched->multitarget_wakeups)
printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
if (sched->nr_run_events_optimized)
printf("run atoms optimized: %ld\n",
sched->nr_run_events_optimized);
print_task_traces(sched);
add_cross_task_wakeups(sched);
sched->thread_funcs_exit = false;
create_tasks(sched);
printf("------------------------------------------------------------\n");
for (i = 0; i < sched->replay_repeat; i++)
run_one_test(sched);
sched->thread_funcs_exit = true;
destroy_tasks(sched);
return 0;
}
static void setup_sorting(struct perf_sched *sched, const struct option *options,
const char * const usage_msg[])
{
char *tmp, *tok, *str = strdup(sched->sort_order);
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
if (sort_dimension__add(tok, &sched->sort_list) < 0) {
usage_with_options_msg(usage_msg, options,
"Unknown --sort key: `%s'", tok);
}
}
free(str);
sort_dimension__add("pid", &sched->cmp_pid);
}
static bool schedstat_events_exposed(void)
{
/*
* Select "sched:sched_stat_wait" event to check
* whether schedstat tracepoints are exposed.
*/
return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
false : true;
}
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
char **rec_argv;
const char **rec_argv_copy;
const char * const record_args[] = {
"record",
"-a",
"-R",
"-m", "1024",
"-c", "1",
"-e", "sched:sched_switch",
"-e", "sched:sched_stat_runtime",
"-e", "sched:sched_process_fork",
"-e", "sched:sched_wakeup_new",
"-e", "sched:sched_migrate_task",
};
/*
* The tracepoints trace_sched_stat_{wait, sleep, iowait}
* are not exposed to user if CONFIG_SCHEDSTATS is not set,
* to prevent "perf sched record" execution failure, determine
* whether to record schedstat events according to actual situation.
*/
const char * const schedstat_args[] = {
"-e", "sched:sched_stat_wait",
"-e", "sched:sched_stat_sleep",
"-e", "sched:sched_stat_iowait",
};
unsigned int schedstat_argc = schedstat_events_exposed() ?
ARRAY_SIZE(schedstat_args) : 0;
struct tep_event *waking_event;
int ret;
/*
* +2 for either "-e", "sched:sched_wakeup" or
* "-e", "sched:sched_waking"
*/
rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv_copy == NULL) {
free(rec_argv);
return -ENOMEM;
}
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
rec_argv[i++] = strdup("-e");
waking_event = trace_event__tp_format("sched", "sched_waking");
if (!IS_ERR(waking_event))
rec_argv[i++] = strdup("sched:sched_waking");
else
rec_argv[i++] = strdup("sched:sched_wakeup");
for (j = 0; j < schedstat_argc; j++)
rec_argv[i++] = strdup(schedstat_args[j]);
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = strdup(argv[j]);
BUG_ON(i != rec_argc);
memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
ret = cmd_record(rec_argc, rec_argv_copy);
for (i = 0; i < rec_argc; i++)
free(rec_argv[i]);
free(rec_argv);
free(rec_argv_copy);
return ret;
}
int cmd_sched(int argc, const char **argv)
{
static const char default_sort_order[] = "avg, max, switch, runtime";
struct perf_sched sched = {
.tool = {
.sample = perf_sched__process_tracepoint_sample,
.comm = perf_sched__process_comm,
.namespaces = perf_event__process_namespaces,
.lost = perf_event__process_lost,
.fork = perf_sched__process_fork_event,
.ordered_events = true,
},
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
.sort_list = LIST_HEAD_INIT(sched.sort_list),
.sort_order = default_sort_order,
.replay_repeat = 10,
.profile_cpu = -1,
.next_shortname1 = 'A',
.next_shortname2 = '0',
.skip_merge = 0,
.show_callchain = 1,
.max_stack = 5,
};
const struct option sched_options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
OPT_END()
};
const struct option latency_options[] = {
OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
"sort by key(s): runtime, switch, avg, max"),
OPT_INTEGER('C', "CPU", &sched.profile_cpu,
"CPU to profile on"),
OPT_BOOLEAN('p', "pids", &sched.skip_merge,
"latency stats per pid instead of per comm"),
OPT_PARENT(sched_options)
};
const struct option replay_options[] = {
OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
"repeat the workload replay N times (-1: infinite)"),
OPT_PARENT(sched_options)
};
const struct option map_options[] = {
OPT_BOOLEAN(0, "compact", &sched.map.comp,
"map output in compact mode"),
OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
"highlight given pids in map"),
OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
"highlight given CPUs in map"),
OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
"display given CPUs in map"),
OPT_PARENT(sched_options)
};
const struct option timehist_options[] = {
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
"Display call chains if present (default on)"),
OPT_UINTEGER(0, "max-stack", &sched.max_stack,
"Maximum number of functions to display backtrace."),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_BOOLEAN('s', "summary", &sched.summary_only,
"Show only syscall summary with statistics"),
OPT_BOOLEAN('S', "with-summary", &sched.summary,
"Show all syscalls and summary with statistics"),
OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
OPT_STRING(0, "time", &sched.time_str, "str",
"Time span for analysis (start,stop)"),
OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
"analyze events only for given process id(s)"),
OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
"analyze events only for given thread id(s)"),
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
OPT_PARENT(sched_options)
};
const char * const latency_usage[] = {
"perf sched latency [<options>]",
NULL
};
const char * const replay_usage[] = {
"perf sched replay [<options>]",
NULL
};
const char * const map_usage[] = {
"perf sched map [<options>]",
NULL
};
const char * const timehist_usage[] = {
"perf sched timehist [<options>]",
NULL
};
const char *const sched_subcommands[] = { "record", "latency", "map",
"replay", "script",
"timehist", NULL };
const char *sched_usage[] = {
NULL,
NULL
};
struct trace_sched_handler lat_ops = {
.wakeup_event = latency_wakeup_event,
.switch_event = latency_switch_event,
.runtime_event = latency_runtime_event,
.migrate_task_event = latency_migrate_task_event,
};
struct trace_sched_handler map_ops = {
.switch_event = map_switch_event,
};
struct trace_sched_handler replay_ops = {
.wakeup_event = replay_wakeup_event,
.switch_event = replay_switch_event,
.fork_event = replay_fork_event,
};
unsigned int i;
int ret = 0;
mutex_init(&sched.start_work_mutex);
mutex_init(&sched.work_done_wait_mutex);
sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread));
if (!sched.curr_thread) {
ret = -ENOMEM;
goto out;
}
sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched));
if (!sched.cpu_last_switched) {
ret = -ENOMEM;
goto out;
}
sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid));
if (!sched.curr_pid) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < MAX_CPUS; i++)
sched.curr_pid[i] = -1;
argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(sched_usage, sched_options);
/*
* Aliased to 'perf script' for now:
*/
if (!strcmp(argv[0], "script")) {
ret = cmd_script(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
ret = __cmd_record(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
sched.tp_handler = &lat_ops;
if (argc > 1) {
argc = parse_options(argc, argv, latency_options, latency_usage, 0);
if (argc)
usage_with_options(latency_usage, latency_options);
}
setup_sorting(&sched, latency_options, latency_usage);
ret = perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
if (argc) {
argc = parse_options(argc, argv, map_options, map_usage, 0);
if (argc)
usage_with_options(map_usage, map_options);
}
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
ret = perf_sched__map(&sched);
} else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
sched.tp_handler = &replay_ops;
if (argc) {
argc = parse_options(argc, argv, replay_options, replay_usage, 0);
if (argc)
usage_with_options(replay_usage, replay_options);
}
ret = perf_sched__replay(&sched);
} else if (!strcmp(argv[0], "timehist")) {
if (argc) {
argc = parse_options(argc, argv, timehist_options,
timehist_usage, 0);
if (argc)
usage_with_options(timehist_usage, timehist_options);
}
if ((sched.show_wakeups || sched.show_next) &&
sched.summary_only) {
pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
parse_options_usage(timehist_usage, timehist_options, "s", true);
if (sched.show_wakeups)
parse_options_usage(NULL, timehist_options, "w", true);
if (sched.show_next)
parse_options_usage(NULL, timehist_options, "n", true);
ret = -EINVAL;
goto out;
}
ret = symbol__validate_sym_arguments();
if (ret)
goto out;
ret = perf_sched__timehist(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
out:
free(sched.curr_pid);
free(sched.cpu_last_switched);
free(sched.curr_thread);
mutex_destroy(&sched.start_work_mutex);
mutex_destroy(&sched.work_done_wait_mutex);
return ret;
}
| linux-master | tools/perf/builtin-sched.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-list.c
*
* Builtin list command: list all event types
*
* Copyright (C) 2009, Thomas Gleixner <[email protected]>
* Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <[email protected]>
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "builtin.h"
#include "util/print-events.h"
#include "util/pmus.h"
#include "util/pmu.h"
#include "util/debug.h"
#include "util/metricgroup.h"
#include "util/pfm.h"
#include "util/string2.h"
#include "util/strlist.h"
#include "util/strbuf.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include <linux/zalloc.h>
#include <stdarg.h>
#include <stdio.h>
/**
* struct print_state - State and configuration passed to the default_print
* functions.
*/
struct print_state {
/**
* @pmu_glob: Optionally restrict PMU and metric matching to PMU or
* debugfs subsystem name.
*/
char *pmu_glob;
/** @event_glob: Optional pattern matching glob. */
char *event_glob;
/** @name_only: Print event or metric names only. */
bool name_only;
/** @desc: Print the event or metric description. */
bool desc;
/** @long_desc: Print longer event or metric description. */
bool long_desc;
/** @deprecated: Print deprecated events or metrics. */
bool deprecated;
/**
* @detailed: Print extra information on the perf event such as names
* and expressions used internally by events.
*/
bool detailed;
/** @metrics: Controls printing of metric and metric groups. */
bool metrics;
/** @metricgroups: Controls printing of metric and metric groups. */
bool metricgroups;
/** @last_topic: The last printed event topic. */
char *last_topic;
/** @last_metricgroups: The last printed metric group. */
char *last_metricgroups;
/** @visited_metrics: Metrics that are printed to avoid duplicates. */
struct strlist *visited_metrics;
};
static void default_print_start(void *ps)
{
struct print_state *print_state = ps;
if (!print_state->name_only && pager_in_use())
printf("\nList of pre-defined events (to be used in -e or -M):\n\n");
}
static void default_print_end(void *print_state __maybe_unused) {}
static void wordwrap(const char *s, int start, int max, int corr)
{
int column = start;
int n;
bool saw_newline = false;
while (*s) {
int wlen = strcspn(s, " \t\n");
if ((column + wlen >= max && column > start) || saw_newline) {
printf("\n%*s", start, "");
column = start + corr;
}
n = printf("%s%.*s", column > start ? " " : "", wlen, s);
if (n <= 0)
break;
saw_newline = s[wlen] == '\n';
s += wlen;
column += n;
s = skip_spaces(s);
}
}
static void default_print_event(void *ps, const char *pmu_name, const char *topic,
const char *event_name, const char *event_alias,
const char *scale_unit __maybe_unused,
bool deprecated, const char *event_type_desc,
const char *desc, const char *long_desc,
const char *encoding_desc)
{
struct print_state *print_state = ps;
int pos;
if (deprecated && !print_state->deprecated)
return;
if (print_state->pmu_glob && pmu_name && !strglobmatch(pmu_name, print_state->pmu_glob))
return;
if (print_state->event_glob &&
(!event_name || !strglobmatch(event_name, print_state->event_glob)) &&
(!event_alias || !strglobmatch(event_alias, print_state->event_glob)) &&
(!topic || !strglobmatch_nocase(topic, print_state->event_glob)))
return;
if (print_state->name_only) {
if (event_alias && strlen(event_alias))
printf("%s ", event_alias);
else
printf("%s ", event_name);
return;
}
if (strcmp(print_state->last_topic, topic ?: "")) {
if (topic)
printf("\n%s:\n", topic);
zfree(&print_state->last_topic);
print_state->last_topic = strdup(topic ?: "");
}
if (event_alias && strlen(event_alias))
pos = printf(" %s OR %s", event_name, event_alias);
else
pos = printf(" %s", event_name);
if (!topic && event_type_desc) {
for (; pos < 53; pos++)
putchar(' ');
printf("[%s]\n", event_type_desc);
} else
putchar('\n');
if (desc && print_state->desc) {
char *desc_with_unit = NULL;
int desc_len = -1;
if (pmu_name && strcmp(pmu_name, "default_core")) {
desc_len = strlen(desc);
desc_len = asprintf(&desc_with_unit,
desc[desc_len - 1] != '.'
? "%s. Unit: %s" : "%s Unit: %s",
desc, pmu_name);
}
printf("%*s", 8, "[");
wordwrap(desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0);
printf("]\n");
free(desc_with_unit);
}
long_desc = long_desc ?: desc;
if (long_desc && print_state->long_desc) {
printf("%*s", 8, "[");
wordwrap(long_desc, 8, pager_get_columns(), 0);
printf("]\n");
}
if (print_state->detailed && encoding_desc) {
printf("%*s", 8, "");
wordwrap(encoding_desc, 8, pager_get_columns(), 0);
putchar('\n');
}
}
static void default_print_metric(void *ps,
const char *group,
const char *name,
const char *desc,
const char *long_desc,
const char *expr,
const char *threshold,
const char *unit __maybe_unused)
{
struct print_state *print_state = ps;
if (print_state->event_glob &&
(!print_state->metrics || !name || !strglobmatch(name, print_state->event_glob)) &&
(!print_state->metricgroups || !group || !strglobmatch(group, print_state->event_glob)))
return;
if (!print_state->name_only && !print_state->last_metricgroups) {
if (print_state->metricgroups) {
printf("\nMetric Groups:\n");
if (!print_state->metrics)
putchar('\n');
} else {
printf("\nMetrics:\n\n");
}
}
if (!print_state->last_metricgroups ||
strcmp(print_state->last_metricgroups, group ?: "")) {
if (group && print_state->metricgroups) {
if (print_state->name_only)
printf("%s ", group);
else if (print_state->metrics) {
const char *gdesc = describe_metricgroup(group);
if (gdesc)
printf("\n%s: [%s]\n", group, gdesc);
else
printf("\n%s:\n", group);
} else
printf("%s\n", group);
}
zfree(&print_state->last_metricgroups);
print_state->last_metricgroups = strdup(group ?: "");
}
if (!print_state->metrics)
return;
if (print_state->name_only) {
if (print_state->metrics &&
!strlist__has_entry(print_state->visited_metrics, name)) {
printf("%s ", name);
strlist__add(print_state->visited_metrics, name);
}
return;
}
printf(" %s\n", name);
if (desc && print_state->desc) {
printf("%*s", 8, "[");
wordwrap(desc, 8, pager_get_columns(), 0);
printf("]\n");
}
if (long_desc && print_state->long_desc) {
printf("%*s", 8, "[");
wordwrap(long_desc, 8, pager_get_columns(), 0);
printf("]\n");
}
if (expr && print_state->detailed) {
printf("%*s", 8, "[");
wordwrap(expr, 8, pager_get_columns(), 0);
printf("]\n");
}
if (threshold && print_state->detailed) {
printf("%*s", 8, "[");
wordwrap(threshold, 8, pager_get_columns(), 0);
printf("]\n");
}
}
struct json_print_state {
/** Should a separator be printed prior to the next item? */
bool need_sep;
};
static void json_print_start(void *print_state __maybe_unused)
{
printf("[\n");
}
static void json_print_end(void *ps)
{
struct json_print_state *print_state = ps;
printf("%s]\n", print_state->need_sep ? "\n" : "");
}
static void fix_escape_printf(struct strbuf *buf, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
strbuf_setlen(buf, 0);
for (size_t fmt_pos = 0; fmt_pos < strlen(fmt); fmt_pos++) {
switch (fmt[fmt_pos]) {
case '%':
fmt_pos++;
switch (fmt[fmt_pos]) {
case 's': {
const char *s = va_arg(args, const char*);
strbuf_addstr(buf, s);
break;
}
case 'S': {
const char *s = va_arg(args, const char*);
for (size_t s_pos = 0; s_pos < strlen(s); s_pos++) {
switch (s[s_pos]) {
case '\n':
strbuf_addstr(buf, "\\n");
break;
case '\\':
fallthrough;
case '\"':
strbuf_addch(buf, '\\');
fallthrough;
default:
strbuf_addch(buf, s[s_pos]);
break;
}
}
break;
}
default:
pr_err("Unexpected format character '%c'\n", fmt[fmt_pos]);
strbuf_addch(buf, '%');
strbuf_addch(buf, fmt[fmt_pos]);
}
break;
default:
strbuf_addch(buf, fmt[fmt_pos]);
break;
}
}
va_end(args);
fputs(buf->buf, stdout);
}
static void json_print_event(void *ps, const char *pmu_name, const char *topic,
const char *event_name, const char *event_alias,
const char *scale_unit,
bool deprecated, const char *event_type_desc,
const char *desc, const char *long_desc,
const char *encoding_desc)
{
struct json_print_state *print_state = ps;
bool need_sep = false;
struct strbuf buf;
strbuf_init(&buf, 0);
printf("%s{\n", print_state->need_sep ? ",\n" : "");
print_state->need_sep = true;
if (pmu_name) {
fix_escape_printf(&buf, "\t\"Unit\": \"%S\"", pmu_name);
need_sep = true;
}
if (topic) {
fix_escape_printf(&buf, "%s\t\"Topic\": \"%S\"", need_sep ? ",\n" : "", topic);
need_sep = true;
}
if (event_name) {
fix_escape_printf(&buf, "%s\t\"EventName\": \"%S\"", need_sep ? ",\n" : "",
event_name);
need_sep = true;
}
if (event_alias && strlen(event_alias)) {
fix_escape_printf(&buf, "%s\t\"EventAlias\": \"%S\"", need_sep ? ",\n" : "",
event_alias);
need_sep = true;
}
if (scale_unit && strlen(scale_unit)) {
fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "",
scale_unit);
need_sep = true;
}
if (event_type_desc) {
fix_escape_printf(&buf, "%s\t\"EventType\": \"%S\"", need_sep ? ",\n" : "",
event_type_desc);
need_sep = true;
}
if (deprecated) {
fix_escape_printf(&buf, "%s\t\"Deprecated\": \"%S\"", need_sep ? ",\n" : "",
deprecated ? "1" : "0");
need_sep = true;
}
if (desc) {
fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
desc);
need_sep = true;
}
if (long_desc) {
fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
long_desc);
need_sep = true;
}
if (encoding_desc) {
fix_escape_printf(&buf, "%s\t\"Encoding\": \"%S\"", need_sep ? ",\n" : "",
encoding_desc);
need_sep = true;
}
printf("%s}", need_sep ? "\n" : "");
strbuf_release(&buf);
}
static void json_print_metric(void *ps __maybe_unused, const char *group,
const char *name, const char *desc,
const char *long_desc, const char *expr,
const char *threshold, const char *unit)
{
struct json_print_state *print_state = ps;
bool need_sep = false;
struct strbuf buf;
strbuf_init(&buf, 0);
printf("%s{\n", print_state->need_sep ? ",\n" : "");
print_state->need_sep = true;
if (group) {
fix_escape_printf(&buf, "\t\"MetricGroup\": \"%S\"", group);
need_sep = true;
}
if (name) {
fix_escape_printf(&buf, "%s\t\"MetricName\": \"%S\"", need_sep ? ",\n" : "", name);
need_sep = true;
}
if (expr) {
fix_escape_printf(&buf, "%s\t\"MetricExpr\": \"%S\"", need_sep ? ",\n" : "", expr);
need_sep = true;
}
if (threshold) {
fix_escape_printf(&buf, "%s\t\"MetricThreshold\": \"%S\"", need_sep ? ",\n" : "",
threshold);
need_sep = true;
}
if (unit) {
fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "", unit);
need_sep = true;
}
if (desc) {
fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
desc);
need_sep = true;
}
if (long_desc) {
fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
long_desc);
need_sep = true;
}
printf("%s}", need_sep ? "\n" : "");
strbuf_release(&buf);
}
static bool default_skip_duplicate_pmus(void *ps)
{
struct print_state *print_state = ps;
return !print_state->long_desc;
}
int cmd_list(int argc, const char **argv)
{
int i, ret = 0;
struct print_state default_ps = {};
struct print_state json_ps = {};
void *ps = &default_ps;
struct print_callbacks print_cb = {
.print_start = default_print_start,
.print_end = default_print_end,
.print_event = default_print_event,
.print_metric = default_print_metric,
.skip_duplicate_pmus = default_skip_duplicate_pmus,
};
const char *cputype = NULL;
const char *unit_name = NULL;
bool json = false;
struct option list_options[] = {
OPT_BOOLEAN(0, "raw-dump", &default_ps.name_only, "Dump raw events"),
OPT_BOOLEAN('j', "json", &json, "JSON encode events and metrics"),
OPT_BOOLEAN('d', "desc", &default_ps.desc,
"Print extra event descriptions. --no-desc to not print."),
OPT_BOOLEAN('v', "long-desc", &default_ps.long_desc,
"Print longer event descriptions."),
OPT_BOOLEAN(0, "details", &default_ps.detailed,
"Print information on the perf event names and expressions used internally by events."),
OPT_BOOLEAN(0, "deprecated", &default_ps.deprecated,
"Print deprecated events."),
OPT_STRING(0, "cputype", &cputype, "cpu type",
"Limit PMU or metric printing to the given PMU (e.g. cpu, core or atom)."),
OPT_STRING(0, "unit", &unit_name, "PMU name",
"Limit PMU or metric printing to the specified PMU."),
OPT_INCR(0, "debug", &verbose,
"Enable debugging output"),
OPT_END()
};
const char * const list_usage[] = {
#ifdef HAVE_LIBPFM
"perf list [<options>] [hw|sw|cache|tracepoint|pmu|sdt|metric|metricgroup|event_glob|pfm]",
#else
"perf list [<options>] [hw|sw|cache|tracepoint|pmu|sdt|metric|metricgroup|event_glob]",
#endif
NULL
};
set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
/* Hide hybrid flag for the more generic 'unit' flag. */
set_option_flag(list_options, 0, "cputype", PARSE_OPT_HIDDEN);
argc = parse_options(argc, argv, list_options, list_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
setup_pager();
if (!default_ps.name_only)
setup_pager();
if (json) {
print_cb = (struct print_callbacks){
.print_start = json_print_start,
.print_end = json_print_end,
.print_event = json_print_event,
.print_metric = json_print_metric,
};
ps = &json_ps;
} else {
default_ps.desc = !default_ps.long_desc;
default_ps.last_topic = strdup("");
assert(default_ps.last_topic);
default_ps.visited_metrics = strlist__new(NULL, NULL);
assert(default_ps.visited_metrics);
if (unit_name)
default_ps.pmu_glob = strdup(unit_name);
else if (cputype) {
const struct perf_pmu *pmu = perf_pmus__pmu_for_pmu_filter(cputype);
if (!pmu) {
pr_err("ERROR: cputype is not supported!\n");
ret = -1;
goto out;
}
default_ps.pmu_glob = strdup(pmu->name);
}
}
print_cb.print_start(ps);
if (argc == 0) {
default_ps.metrics = true;
default_ps.metricgroups = true;
print_events(&print_cb, ps);
goto out;
}
for (i = 0; i < argc; ++i) {
char *sep, *s;
if (strcmp(argv[i], "tracepoint") == 0)
print_tracepoint_events(&print_cb, ps);
else if (strcmp(argv[i], "hw") == 0 ||
strcmp(argv[i], "hardware") == 0)
print_symbol_events(&print_cb, ps, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX);
else if (strcmp(argv[i], "sw") == 0 ||
strcmp(argv[i], "software") == 0) {
print_symbol_events(&print_cb, ps, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX);
print_tool_events(&print_cb, ps);
} else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
print_hwcache_events(&print_cb, ps);
else if (strcmp(argv[i], "pmu") == 0)
perf_pmus__print_pmu_events(&print_cb, ps);
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(&print_cb, ps);
else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0) {
default_ps.metricgroups = false;
default_ps.metrics = true;
metricgroup__print(&print_cb, ps);
} else if (strcmp(argv[i], "metricgroup") == 0 ||
strcmp(argv[i], "metricgroups") == 0) {
default_ps.metricgroups = true;
default_ps.metrics = false;
metricgroup__print(&print_cb, ps);
}
#ifdef HAVE_LIBPFM
else if (strcmp(argv[i], "pfm") == 0)
print_libpfm_events(&print_cb, ps);
#endif
else if ((sep = strchr(argv[i], ':')) != NULL) {
char *old_pmu_glob = default_ps.pmu_glob;
default_ps.event_glob = strdup(argv[i]);
if (!default_ps.event_glob) {
ret = -1;
goto out;
}
print_tracepoint_events(&print_cb, ps);
print_sdt_events(&print_cb, ps);
default_ps.metrics = true;
default_ps.metricgroups = true;
metricgroup__print(&print_cb, ps);
zfree(&default_ps.event_glob);
default_ps.pmu_glob = old_pmu_glob;
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
printf("Critical: Not enough memory! Trying to continue...\n");
continue;
}
default_ps.event_glob = s;
print_symbol_events(&print_cb, ps, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX);
print_symbol_events(&print_cb, ps, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX);
print_tool_events(&print_cb, ps);
print_hwcache_events(&print_cb, ps);
perf_pmus__print_pmu_events(&print_cb, ps);
print_tracepoint_events(&print_cb, ps);
print_sdt_events(&print_cb, ps);
default_ps.metrics = true;
default_ps.metricgroups = true;
metricgroup__print(&print_cb, ps);
free(s);
}
}
out:
print_cb.print_end(ps);
free(default_ps.pmu_glob);
free(default_ps.last_topic);
free(default_ps.last_metricgroups);
strlist__delete(default_ps.visited_metrics);
return ret;
}
| linux-master | tools/perf/builtin-list.c |
/*
* builtin-trace.c
*
* Builtin 'trace' command:
*
* Display a continuously updated trace of any workload, CPU, specific PID,
* system wide, etc. Default format is loosely strace like, but any other
* event may be specified using --event.
*
* Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*
* Initially based on the 'trace' prototype by Thomas Gleixner:
*
* http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
*/
#include "util/record.h"
#include <api/fs/tracing_path.h>
#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#ifdef HAVE_BPF_SKEL
#include "bpf_skel/augmented_raw_syscalls.skel.h"
#endif
#endif
#include "util/bpf_map.h"
#include "util/rlimit.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
#include "util/config.h"
#include "util/debug.h"
#include "util/dso.h"
#include "util/env.h"
#include "util/event.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
#include "util/synthetic-events.h"
#include "util/evlist.h"
#include "util/evswitch.h"
#include "util/mmap.h"
#include <subcmd/pager.h>
#include <subcmd/exec-cmd.h>
#include "util/machine.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/path.h"
#include "util/session.h"
#include "util/thread.h"
#include <subcmd/parse-options.h>
#include "util/strlist.h"
#include "util/intlist.h"
#include "util/thread_map.h"
#include "util/stat.h"
#include "util/tool.h"
#include "util/util.h"
#include "trace/beauty/beauty.h"
#include "trace-event.h"
#include "util/parse-events.h"
#include "util/tracepoint.h"
#include "callchain.h"
#include "print_binary.h"
#include "string2.h"
#include "syscalltbl.h"
#include "rb_resort.h"
#include "../perf.h"
#include <errno.h>
#include <inttypes.h>
#include <poll.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <linux/err.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/stringify.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <fcntl.h>
#include <sys/sysmacros.h>
#include <linux/ctype.h>
#include <perf/mmap.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
#ifndef O_CLOEXEC
# define O_CLOEXEC 02000000
#endif
#ifndef F_LINUX_SPECIFIC_BASE
# define F_LINUX_SPECIFIC_BASE 1024
#endif
#define RAW_SYSCALL_ARGS_NUM 6
/*
* strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
*/
struct syscall_arg_fmt {
size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
void *parm;
const char *name;
u16 nr_entries; // for arrays
bool show_zero;
};
struct syscall_fmt {
const char *name;
const char *alias;
struct {
const char *sys_enter,
*sys_exit;
} bpf_prog_name;
struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
u8 nr_args;
bool errpid;
bool timeout;
bool hexret;
};
struct trace {
struct perf_tool tool;
struct syscalltbl *sctbl;
struct {
struct syscall *table;
struct {
struct evsel *sys_enter,
*sys_exit,
*bpf_output;
} events;
} syscalls;
#ifdef HAVE_BPF_SKEL
struct augmented_raw_syscalls_bpf *skel;
#endif
struct record_opts opts;
struct evlist *evlist;
struct machine *host;
struct thread *current;
struct cgroup *cgroup;
u64 base_time;
FILE *output;
unsigned long nr_events;
unsigned long nr_events_printed;
unsigned long max_events;
struct evswitch evswitch;
struct strlist *ev_qualifier;
struct {
size_t nr;
int *entries;
} ev_qualifier_ids;
struct {
size_t nr;
pid_t *entries;
struct bpf_map *map;
} filter_pids;
double duration_filter;
double runtime_ms;
struct {
u64 vfs_getname,
proc_getname;
} stats;
unsigned int max_stack;
unsigned int min_stack;
int raw_augmented_syscalls_args_size;
bool raw_augmented_syscalls;
bool fd_path_disabled;
bool sort_events;
bool not_ev_qualifier;
bool live;
bool full_time;
bool sched;
bool multiple_threads;
bool summary;
bool summary_only;
bool errno_summary;
bool failure_only;
bool show_comm;
bool print_sample;
bool show_tool_stats;
bool trace_syscalls;
bool libtraceevent_print;
bool kernel_syscallchains;
s16 args_alignment;
bool show_tstamp;
bool show_duration;
bool show_zeros;
bool show_arg_names;
bool show_string_prefix;
bool force;
bool vfs_getname;
int trace_pgfaults;
char *perfconfig_events;
struct {
struct ordered_events data;
u64 last;
} oe;
};
struct tp_field {
int offset;
union {
u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
};
};
#define TP_UINT_FIELD(bits) \
static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
{ \
u##bits value; \
memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
return value; \
}
TP_UINT_FIELD(8);
TP_UINT_FIELD(16);
TP_UINT_FIELD(32);
TP_UINT_FIELD(64);
#define TP_UINT_FIELD__SWAPPED(bits) \
static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
{ \
u##bits value; \
memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
return bswap_##bits(value);\
}
TP_UINT_FIELD__SWAPPED(16);
TP_UINT_FIELD__SWAPPED(32);
TP_UINT_FIELD__SWAPPED(64);
static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
{
field->offset = offset;
switch (size) {
case 1:
field->integer = tp_field__u8;
break;
case 2:
field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
break;
case 4:
field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
break;
case 8:
field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
break;
default:
return -1;
}
return 0;
}
static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
{
return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
}
static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
{
return sample->raw_data + field->offset;
}
static int __tp_field__init_ptr(struct tp_field *field, int offset)
{
field->offset = offset;
field->pointer = tp_field__ptr;
return 0;
}
static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
{
return __tp_field__init_ptr(field, format_field->offset);
}
struct syscall_tp {
struct tp_field id;
union {
struct tp_field args, ret;
};
};
/*
* The evsel->priv as used by 'perf trace'
* sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
* fmt: for all the other tracepoints
*/
struct evsel_trace {
struct syscall_tp sc;
struct syscall_arg_fmt *fmt;
};
static struct evsel_trace *evsel_trace__new(void)
{
return zalloc(sizeof(struct evsel_trace));
}
static void evsel_trace__delete(struct evsel_trace *et)
{
if (et == NULL)
return;
zfree(&et->fmt);
free(et);
}
/*
* Used with raw_syscalls:sys_{enter,exit} and with the
* syscalls:sys_{enter,exit}_SYSCALL tracepoints
*/
static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
{
struct evsel_trace *et = evsel->priv;
return &et->sc;
}
static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
{
if (evsel->priv == NULL) {
evsel->priv = evsel_trace__new();
if (evsel->priv == NULL)
return NULL;
}
return __evsel__syscall_tp(evsel);
}
/*
* Used with all the other tracepoints.
*/
static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
{
struct evsel_trace *et = evsel->priv;
return et->fmt;
}
static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
{
struct evsel_trace *et = evsel->priv;
if (evsel->priv == NULL) {
et = evsel->priv = evsel_trace__new();
if (et == NULL)
return NULL;
}
if (et->fmt == NULL) {
et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
if (et->fmt == NULL)
goto out_delete;
}
return __evsel__syscall_arg_fmt(evsel);
out_delete:
evsel_trace__delete(evsel->priv);
evsel->priv = NULL;
return NULL;
}
static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
{
struct tep_format_field *format_field = evsel__field(evsel, name);
if (format_field == NULL)
return -1;
return tp_field__init_uint(field, format_field, evsel->needs_swap);
}
#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
evsel__init_tp_uint_field(evsel, &sc->name, #name); })
static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
{
struct tep_format_field *format_field = evsel__field(evsel, name);
if (format_field == NULL)
return -1;
return tp_field__init_ptr(field, format_field);
}
#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
static void evsel__delete_priv(struct evsel *evsel)
{
zfree(&evsel->priv);
evsel__delete(evsel);
}
static int evsel__init_syscall_tp(struct evsel *evsel)
{
struct syscall_tp *sc = evsel__syscall_tp(evsel);
if (sc != NULL) {
if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
return -ENOENT;
return 0;
}
return -ENOMEM;
}
static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
{
struct syscall_tp *sc = evsel__syscall_tp(evsel);
if (sc != NULL) {
struct tep_format_field *syscall_id = evsel__field(tp, "id");
if (syscall_id == NULL)
syscall_id = evsel__field(tp, "__syscall_nr");
if (syscall_id == NULL ||
__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
return -EINVAL;
return 0;
}
return -ENOMEM;
}
static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
{
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
}
static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
{
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
}
static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
{
if (evsel__syscall_tp(evsel) != NULL) {
if (perf_evsel__init_sc_tp_uint_field(evsel, id))
return -ENOENT;
evsel->handler = handler;
return 0;
}
return -ENOMEM;
}
static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
{
struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
if (IS_ERR(evsel))
evsel = evsel__newtp("syscalls", direction);
if (IS_ERR(evsel))
return NULL;
if (evsel__init_raw_syscall_tp(evsel, handler))
goto out_delete;
return evsel;
out_delete:
evsel__delete_priv(evsel);
return NULL;
}
#define perf_evsel__sc_tp_uint(evsel, name, sample) \
({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
fields->name.integer(&fields->name, sample); })
#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
fields->name.pointer(&fields->name, sample); })
size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
{
int idx = val - sa->offset;
if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
size_t printed = scnprintf(bf, size, intfmt, val);
if (show_suffix)
printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
return printed;
}
return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
}
size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
{
int idx = val - sa->offset;
if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
size_t printed = scnprintf(bf, size, intfmt, val);
if (show_prefix)
printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
return printed;
}
return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
}
static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
const char *intfmt,
struct syscall_arg *arg)
{
return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
}
static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
struct syscall_arg *arg)
{
return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
}
#define SCA_STRARRAY syscall_arg__scnprintf_strarray
bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
{
return strarray__strtoul(arg->parm, bf, size, ret);
}
bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
{
return strarray__strtoul_flags(arg->parm, bf, size, ret);
}
bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
{
return strarrays__strtoul(arg->parm, bf, size, ret);
}
size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
{
return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
}
size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
{
size_t printed;
int i;
for (i = 0; i < sas->nr_entries; ++i) {
struct strarray *sa = sas->entries[i];
int idx = val - sa->offset;
if (idx >= 0 && idx < sa->nr_entries) {
if (sa->entries[idx] == NULL)
break;
return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
}
}
printed = scnprintf(bf, size, intfmt, val);
if (show_prefix)
printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
return printed;
}
bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
{
int i;
for (i = 0; i < sa->nr_entries; ++i) {
if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
*ret = sa->offset + i;
return true;
}
}
return false;
}
bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
{
u64 val = 0;
char *tok = bf, *sep, *end;
*ret = 0;
while (size != 0) {
int toklen = size;
sep = memchr(tok, '|', size);
if (sep != NULL) {
size -= sep - tok + 1;
end = sep - 1;
while (end > tok && isspace(*end))
--end;
toklen = end - tok + 1;
}
while (isspace(*tok))
++tok;
if (isalpha(*tok) || *tok == '_') {
if (!strarray__strtoul(sa, tok, toklen, &val))
return false;
} else
val = strtoul(tok, NULL, 0);
*ret |= (1 << (val - 1));
if (sep == NULL)
break;
tok = sep + 1;
}
return true;
}
bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
{
int i;
for (i = 0; i < sas->nr_entries; ++i) {
struct strarray *sa = sas->entries[i];
if (strarray__strtoul(sa, bf, size, ret))
return true;
}
return false;
}
size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
struct syscall_arg *arg)
{
return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
}
#ifndef AT_FDCWD
#define AT_FDCWD -100
#endif
static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
struct syscall_arg *arg)
{
int fd = arg->val;
const char *prefix = "AT_FD";
if (fd == AT_FDCWD)
return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
return syscall_arg__scnprintf_fd(bf, size, arg);
}
#define SCA_FDAT syscall_arg__scnprintf_fd_at
static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
struct syscall_arg *arg);
#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
{
return scnprintf(bf, size, "%#lx", arg->val);
}
size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
{
if (arg->val == 0)
return scnprintf(bf, size, "NULL");
return syscall_arg__scnprintf_hex(bf, size, arg);
}
size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
{
return scnprintf(bf, size, "%d", arg->val);
}
size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
{
return scnprintf(bf, size, "%ld", arg->val);
}
static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
{
// XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
// fill missing comms using thread__set_comm()...
// here or in a special syscall_arg__scnprintf_pid_sched_tp...
return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
}
#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
static const char *bpf_cmd[] = {
"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
"MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH",
"PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID",
"PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD",
"PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID",
"TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE",
"BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH",
"MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE",
"LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE",
"LINK_DETACH", "PROG_BIND_MAP",
};
static DEFINE_STRARRAY(bpf_cmd, "BPF_");
static const char *fsmount_flags[] = {
[1] = "CLOEXEC",
};
static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
#include "trace/beauty/generated/fsconfig_arrays.c"
static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
static DEFINE_STRARRAY(itimers, "ITIMER_");
static const char *keyctl_options[] = {
"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
};
static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
static const char *whences[] = { "SET", "CUR", "END",
#ifdef SEEK_DATA
"DATA",
#endif
#ifdef SEEK_HOLE
"HOLE",
#endif
};
static DEFINE_STRARRAY(whences, "SEEK_");
static const char *fcntl_cmds[] = {
"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
"GETOWNER_UIDS",
};
static DEFINE_STRARRAY(fcntl_cmds, "F_");
static const char *fcntl_linux_specific_cmds[] = {
"SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
};
static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
static struct strarray *fcntl_cmds_arrays[] = {
&strarray__fcntl_cmds,
&strarray__fcntl_linux_specific_cmds,
};
static DEFINE_STRARRAYS(fcntl_cmds_arrays);
static const char *rlimit_resources[] = {
"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
"RTTIME",
};
static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
static DEFINE_STRARRAY(sighow, "SIG_");
static const char *clockid[] = {
"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
};
static DEFINE_STRARRAY(clockid, "CLOCK_");
static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *suffix = "_OK";
size_t printed = 0;
int mode = arg->val;
if (mode == F_OK) /* 0 */
return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
#define P_MODE(n) \
if (mode & n##_OK) { \
printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
mode &= ~n##_OK; \
}
P_MODE(R);
P_MODE(W);
P_MODE(X);
#undef P_MODE
if (mode)
printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
return printed;
}
#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
struct syscall_arg *arg);
#define SCA_FILENAME syscall_arg__scnprintf_filename
static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "O_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & O_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~O_##n; \
}
P_FLAG(CLOEXEC);
P_FLAG(NONBLOCK);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
#ifndef GRND_NONBLOCK
#define GRND_NONBLOCK 0x0001
#endif
#ifndef GRND_RANDOM
#define GRND_RANDOM 0x0002
#endif
static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "GRND_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & GRND_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~GRND_##n; \
}
P_FLAG(RANDOM);
P_FLAG(NONBLOCK);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
#define STRARRAY(name, array) \
{ .scnprintf = SCA_STRARRAY, \
.strtoul = STUL_STRARRAY, \
.parm = &strarray__##array, }
#define STRARRAY_FLAGS(name, array) \
{ .scnprintf = SCA_STRARRAY_FLAGS, \
.strtoul = STUL_STRARRAY_FLAGS, \
.parm = &strarray__##array, }
#include "trace/beauty/arch_errno_names.c"
#include "trace/beauty/eventfd.c"
#include "trace/beauty/futex_op.c"
#include "trace/beauty/futex_val3.c"
#include "trace/beauty/mmap.c"
#include "trace/beauty/mode_t.c"
#include "trace/beauty/msg_flags.c"
#include "trace/beauty/open_flags.c"
#include "trace/beauty/perf_event_open.c"
#include "trace/beauty/pid.c"
#include "trace/beauty/sched_policy.c"
#include "trace/beauty/seccomp.c"
#include "trace/beauty/signum.c"
#include "trace/beauty/socket_type.c"
#include "trace/beauty/waitid_options.c"
static const struct syscall_fmt syscall_fmts[] = {
{ .name = "access",
.arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
{ .name = "arch_prctl",
.arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
[1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
{ .name = "bind",
.arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
[1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
[2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
{ .name = "bpf",
.arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
{ .name = "brk", .hexret = true,
.arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
{ .name = "clock_gettime",
.arg = { [0] = STRARRAY(clk_id, clockid), }, },
{ .name = "clock_nanosleep",
.arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, },
{ .name = "clone", .errpid = true, .nr_args = 5,
.arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
[1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
[2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
[3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
[4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
{ .name = "close",
.arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
{ .name = "connect",
.arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
[1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
[2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
{ .name = "epoll_ctl",
.arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
{ .name = "eventfd2",
.arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
{ .name = "fchmodat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "fchownat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "fcntl",
.arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
.strtoul = STUL_STRARRAYS,
.parm = &strarrays__fcntl_cmds_arrays,
.show_zero = true, },
[2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
{ .name = "flock",
.arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
{ .name = "fsconfig",
.arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
{ .name = "fsmount",
.arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
[2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
{ .name = "fspick",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
[1] = { .scnprintf = SCA_FILENAME, /* path */ },
[2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
{ .name = "fstat", .alias = "newfstat", },
{ .name = "fstatat", .alias = "newfstatat", },
{ .name = "futex",
.arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
[5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
{ .name = "futimesat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "getitimer",
.arg = { [0] = STRARRAY(which, itimers), }, },
{ .name = "getpid", .errpid = true, },
{ .name = "getpgid", .errpid = true, },
{ .name = "getppid", .errpid = true, },
{ .name = "getrandom",
.arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
{ .name = "getrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
{ .name = "getsockopt",
.arg = { [1] = STRARRAY(level, socket_level), }, },
{ .name = "gettid", .errpid = true, },
{ .name = "ioctl",
.arg = {
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches.
*/
[1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
[2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
#else
[2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
#endif
{ .name = "kcmp", .nr_args = 5,
.arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
[1] = { .name = "pid2", .scnprintf = SCA_PID, },
[2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
[3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
[4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
{ .name = "keyctl",
.arg = { [0] = STRARRAY(option, keyctl_options), }, },
{ .name = "kill",
.arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "linkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "lseek",
.arg = { [2] = STRARRAY(whence, whences), }, },
{ .name = "lstat", .alias = "newlstat", },
{ .name = "madvise",
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
[2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
{ .name = "mkdirat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "mknodat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "mmap", .hexret = true,
/* The standard mmap maps to old_mmap on s390x */
#if defined(__s390x__)
.alias = "old_mmap",
#endif
.arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
[3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
.strtoul = STUL_STRARRAY_FLAGS,
.parm = &strarray__mmap_flags, },
[5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
{ .name = "mount",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
[3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
.mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
{ .name = "move_mount",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
[1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
[2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
[3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
[4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
{ .name = "mprotect",
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
[2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
{ .name = "mq_unlink",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
{ .name = "mremap", .hexret = true,
.arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
{ .name = "name_to_handle_at",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "newfstatat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "open",
.arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
{ .name = "open_by_handle_at",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
[2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
{ .name = "openat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
[2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
{ .name = "perf_event_open",
.arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ },
[2] = { .scnprintf = SCA_INT, /* cpu */ },
[3] = { .scnprintf = SCA_FD, /* group_fd */ },
[4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
{ .name = "pipe2",
.arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
{ .name = "pkey_alloc",
.arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
{ .name = "pkey_free",
.arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
{ .name = "pkey_mprotect",
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
[2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
[3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
{ .name = "poll", .timeout = true, },
{ .name = "ppoll", .timeout = true, },
{ .name = "prctl",
.arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
.strtoul = STUL_STRARRAY,
.parm = &strarray__prctl_options, },
[1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
[2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
{ .name = "pread", .alias = "pread64", },
{ .name = "preadv", .alias = "pread", },
{ .name = "prlimit64",
.arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
{ .name = "pwrite", .alias = "pwrite64", },
{ .name = "readlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "recvfrom",
.arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "recvmmsg",
.arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "recvmsg",
.arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "renameat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
[2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
{ .name = "renameat2",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
[2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
[4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
{ .name = "rt_sigaction",
.arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "rt_sigprocmask",
.arg = { [0] = STRARRAY(how, sighow), }, },
{ .name = "rt_sigqueueinfo",
.arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "rt_tgsigqueueinfo",
.arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "sched_setscheduler",
.arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
{ .name = "seccomp",
.arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
[1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
{ .name = "select", .timeout = true, },
{ .name = "sendfile", .alias = "sendfile64", },
{ .name = "sendmmsg",
.arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "sendmsg",
.arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "sendto",
.arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
[4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
{ .name = "set_tid_address", .errpid = true, },
{ .name = "setitimer",
.arg = { [0] = STRARRAY(which, itimers), }, },
{ .name = "setrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
{ .name = "setsockopt",
.arg = { [1] = STRARRAY(level, socket_level), }, },
{ .name = "socket",
.arg = { [0] = STRARRAY(family, socket_families),
[1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
[2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
{ .name = "socketpair",
.arg = { [0] = STRARRAY(family, socket_families),
[1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
[2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
{ .name = "stat", .alias = "newstat", },
{ .name = "statx",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
[2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
[3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
{ .name = "swapoff",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
{ .name = "swapon",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
{ .name = "symlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "sync_file_range",
.arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
{ .name = "tgkill",
.arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "tkill",
.arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "umount2", .alias = "umount",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
{ .name = "uname", .alias = "newuname", },
{ .name = "unlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "utimensat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
{ .name = "wait4", .errpid = true,
.arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
{ .name = "waitid", .errpid = true,
.arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
};
static int syscall_fmt__cmp(const void *name, const void *fmtp)
{
const struct syscall_fmt *fmt = fmtp;
return strcmp(name, fmt->name);
}
static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts,
const int nmemb,
const char *name)
{
return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
}
static const struct syscall_fmt *syscall_fmt__find(const char *name)
{
const int nmemb = ARRAY_SIZE(syscall_fmts);
return __syscall_fmt__find(syscall_fmts, nmemb, name);
}
static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts,
const int nmemb, const char *alias)
{
int i;
for (i = 0; i < nmemb; ++i) {
if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
return &fmts[i];
}
return NULL;
}
static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
{
const int nmemb = ARRAY_SIZE(syscall_fmts);
return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
}
/*
* is_exit: is this "exit" or "exit_group"?
* is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
* args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
* nonexistent: Just a hole in the syscall table, syscall id not allocated
*/
struct syscall {
struct tep_event *tp_format;
int nr_args;
int args_size;
struct {
struct bpf_program *sys_enter,
*sys_exit;
} bpf_prog;
bool is_exit;
bool is_open;
bool nonexistent;
struct tep_format_field *args;
const char *name;
const struct syscall_fmt *fmt;
struct syscall_arg_fmt *arg_fmt;
};
/*
* We need to have this 'calculated' boolean because in some cases we really
* don't know what is the duration of a syscall, for instance, when we start
* a session and some threads are waiting for a syscall to finish, say 'poll',
* in which case all we can do is to print "( ? ) for duration and for the
* start timestamp.
*/
static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
{
double duration = (double)t / NSEC_PER_MSEC;
size_t printed = fprintf(fp, "(");
if (!calculated)
printed += fprintf(fp, " ");
else if (duration >= 1.0)
printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
else if (duration >= 0.01)
printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
else
printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
return printed + fprintf(fp, "): ");
}
/**
* filename.ptr: The filename char pointer that will be vfs_getname'd
* filename.entry_str_pos: Where to insert the string translated from
* filename.ptr by the vfs_getname tracepoint/kprobe.
* ret_scnprintf: syscall args may set this to a different syscall return
* formatter, for instance, fcntl may return fds, file flags, etc.
*/
struct thread_trace {
u64 entry_time;
bool entry_pending;
unsigned long nr_events;
unsigned long pfmaj, pfmin;
char *entry_str;
double runtime_ms;
size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
struct {
unsigned long ptr;
short int entry_str_pos;
bool pending_open;
unsigned int namelen;
char *name;
} filename;
struct {
int max;
struct file *table;
} files;
struct intlist *syscall_stats;
};
static struct thread_trace *thread_trace__new(void)
{
struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
if (ttrace) {
ttrace->files.max = -1;
ttrace->syscall_stats = intlist__new(NULL);
}
return ttrace;
}
static void thread_trace__free_files(struct thread_trace *ttrace);
static void thread_trace__delete(void *pttrace)
{
struct thread_trace *ttrace = pttrace;
if (!ttrace)
return;
intlist__delete(ttrace->syscall_stats);
ttrace->syscall_stats = NULL;
thread_trace__free_files(ttrace);
zfree(&ttrace->entry_str);
free(ttrace);
}
static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
{
struct thread_trace *ttrace;
if (thread == NULL)
goto fail;
if (thread__priv(thread) == NULL)
thread__set_priv(thread, thread_trace__new());
if (thread__priv(thread) == NULL)
goto fail;
ttrace = thread__priv(thread);
++ttrace->nr_events;
return ttrace;
fail:
color_fprintf(fp, PERF_COLOR_RED,
"WARNING: not enough memory, dropping samples!\n");
return NULL;
}
void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
{
struct thread_trace *ttrace = thread__priv(arg->thread);
ttrace->ret_scnprintf = ret_scnprintf;
}
#define TRACE_PFMAJ (1 << 0)
#define TRACE_PFMIN (1 << 1)
static const size_t trace__entry_str_size = 2048;
static void thread_trace__free_files(struct thread_trace *ttrace)
{
for (int i = 0; i < ttrace->files.max; ++i) {
struct file *file = ttrace->files.table + i;
zfree(&file->pathname);
}
zfree(&ttrace->files.table);
ttrace->files.max = -1;
}
static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
if (fd < 0)
return NULL;
if (fd > ttrace->files.max) {
struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
if (nfiles == NULL)
return NULL;
if (ttrace->files.max != -1) {
memset(nfiles + ttrace->files.max + 1, 0,
(fd - ttrace->files.max) * sizeof(struct file));
} else {
memset(nfiles, 0, (fd + 1) * sizeof(struct file));
}
ttrace->files.table = nfiles;
ttrace->files.max = fd;
}
return ttrace->files.table + fd;
}
struct file *thread__files_entry(struct thread *thread, int fd)
{
return thread_trace__files_entry(thread__priv(thread), fd);
}
static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
{
struct thread_trace *ttrace = thread__priv(thread);
struct file *file = thread_trace__files_entry(ttrace, fd);
if (file != NULL) {
struct stat st;
if (stat(pathname, &st) == 0)
file->dev_maj = major(st.st_rdev);
file->pathname = strdup(pathname);
if (file->pathname)
return 0;
}
return -1;
}
static int thread__read_fd_path(struct thread *thread, int fd)
{
char linkname[PATH_MAX], pathname[PATH_MAX];
struct stat st;
int ret;
if (thread__pid(thread) == thread__tid(thread)) {
scnprintf(linkname, sizeof(linkname),
"/proc/%d/fd/%d", thread__pid(thread), fd);
} else {
scnprintf(linkname, sizeof(linkname),
"/proc/%d/task/%d/fd/%d",
thread__pid(thread), thread__tid(thread), fd);
}
if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
return -1;
ret = readlink(linkname, pathname, sizeof(pathname));
if (ret < 0 || ret > st.st_size)
return -1;
pathname[ret] = '\0';
return trace__set_fd_pathname(thread, fd, pathname);
}
static const char *thread__fd_path(struct thread *thread, int fd,
struct trace *trace)
{
struct thread_trace *ttrace = thread__priv(thread);
if (ttrace == NULL || trace->fd_path_disabled)
return NULL;
if (fd < 0)
return NULL;
if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
if (!trace->live)
return NULL;
++trace->stats.proc_getname;
if (thread__read_fd_path(thread, fd))
return NULL;
}
return ttrace->files.table[fd].pathname;
}
size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
{
int fd = arg->val;
size_t printed = scnprintf(bf, size, "%d", fd);
const char *path = thread__fd_path(arg->thread, fd, arg->trace);
if (path)
printed += scnprintf(bf + printed, size - printed, "<%s>", path);
return printed;
}
size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
{
size_t printed = scnprintf(bf, size, "%d", fd);
struct thread *thread = machine__find_thread(trace->host, pid, pid);
if (thread) {
const char *path = thread__fd_path(thread, fd, trace);
if (path)
printed += scnprintf(bf + printed, size - printed, "<%s>", path);
thread__put(thread);
}
return printed;
}
static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
struct syscall_arg *arg)
{
int fd = arg->val;
size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
struct thread_trace *ttrace = thread__priv(arg->thread);
if (ttrace && fd >= 0 && fd <= ttrace->files.max)
zfree(&ttrace->files.table[fd].pathname);
return printed;
}
static void thread__set_filename_pos(struct thread *thread, const char *bf,
unsigned long ptr)
{
struct thread_trace *ttrace = thread__priv(thread);
ttrace->filename.ptr = ptr;
ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
}
static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
{
struct augmented_arg *augmented_arg = arg->augmented.args;
size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
/*
* So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
* we would have two strings, each prefixed by its size.
*/
int consumed = sizeof(*augmented_arg) + augmented_arg->size;
arg->augmented.args = ((void *)arg->augmented.args) + consumed;
arg->augmented.size -= consumed;
return printed;
}
static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
struct syscall_arg *arg)
{
unsigned long ptr = arg->val;
if (arg->augmented.args)
return syscall_arg__scnprintf_augmented_string(arg, bf, size);
if (!arg->trace->vfs_getname)
return scnprintf(bf, size, "%#x", ptr);
thread__set_filename_pos(arg->thread, bf, ptr);
return 0;
}
static bool trace__filter_duration(struct trace *trace, double t)
{
return t < (trace->duration_filter * NSEC_PER_MSEC);
}
static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
{
double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
return fprintf(fp, "%10.3f ", ts);
}
/*
* We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
* using ttrace->entry_time for a thread that receives a sys_exit without
* first having received a sys_enter ("poll" issued before tracing session
* starts, lost sys_enter exit due to ring buffer overflow).
*/
static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
{
if (tstamp > 0)
return __trace__fprintf_tstamp(trace, tstamp, fp);
return fprintf(fp, " ? ");
}
static pid_t workload_pid = -1;
static volatile sig_atomic_t done = false;
static volatile sig_atomic_t interrupted = false;
static void sighandler_interrupt(int sig __maybe_unused)
{
done = interrupted = true;
}
static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
void *context __maybe_unused)
{
if (info->si_pid == workload_pid)
done = true;
}
static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
{
size_t printed = 0;
if (trace->multiple_threads) {
if (trace->show_comm)
printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
printed += fprintf(fp, "%d ", thread__tid(thread));
}
return printed;
}
static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
{
size_t printed = 0;
if (trace->show_tstamp)
printed = trace__fprintf_tstamp(trace, tstamp, fp);
if (trace->show_duration)
printed += fprintf_duration(duration, duration_calculated, fp);
return printed + trace__fprintf_comm_tid(trace, thread, fp);
}
static int trace__process_event(struct trace *trace, struct machine *machine,
union perf_event *event, struct perf_sample *sample)
{
int ret = 0;
switch (event->header.type) {
case PERF_RECORD_LOST:
color_fprintf(trace->output, PERF_COLOR_RED,
"LOST %" PRIu64 " events!\n", event->lost.lost);
ret = machine__process_lost_event(machine, event, sample);
break;
default:
ret = machine__process_event(machine, event, sample);
break;
}
return ret;
}
static int trace__tool_process(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct trace *trace = container_of(tool, struct trace, tool);
return trace__process_event(trace, machine, event, sample);
}
static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
{
struct machine *machine = vmachine;
if (machine->kptr_restrict_warned)
return NULL;
if (symbol_conf.kptr_restrict) {
pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
"Kernel samples will not be resolved.\n");
machine->kptr_restrict_warned = true;
return NULL;
}
return machine__resolve_kernel_addr(vmachine, addrp, modp);
}
static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
{
int err = symbol__init(NULL);
if (err)
return err;
trace->host = machine__new_host();
if (trace->host == NULL)
return -ENOMEM;
thread__set_priv_destructor(thread_trace__delete);
err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
if (err < 0)
goto out;
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
evlist->core.threads, trace__tool_process,
true, false, 1);
out:
if (err)
symbol__exit();
return err;
}
static void trace__symbols__exit(struct trace *trace)
{
machine__exit(trace->host);
trace->host = NULL;
symbol__exit();
}
static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
{
int idx;
if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
nr_args = sc->fmt->nr_args;
sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
if (sc->arg_fmt == NULL)
return -1;
for (idx = 0; idx < nr_args; ++idx) {
if (sc->fmt)
sc->arg_fmt[idx] = sc->fmt->arg[idx];
}
sc->nr_args = nr_args;
return 0;
}
static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
{ .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
{ .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
};
static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
{
const struct syscall_arg_fmt *fmt = fmtp;
return strcmp(name, fmt->name);
}
static const struct syscall_arg_fmt *
__syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb,
const char *name)
{
return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
}
static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
{
const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
}
static struct tep_format_field *
syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
{
struct tep_format_field *last_field = NULL;
int len;
for (; field; field = field->next, ++arg) {
last_field = field;
if (arg->scnprintf)
continue;
len = strlen(field->name);
if (strcmp(field->type, "const char *") == 0 &&
((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
strstr(field->name, "path") != NULL))
arg->scnprintf = SCA_FILENAME;
else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
arg->scnprintf = SCA_PTR;
else if (strcmp(field->type, "pid_t") == 0)
arg->scnprintf = SCA_PID;
else if (strcmp(field->type, "umode_t") == 0)
arg->scnprintf = SCA_MODE_T;
else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
arg->scnprintf = SCA_CHAR_ARRAY;
arg->nr_entries = field->arraylen;
} else if ((strcmp(field->type, "int") == 0 ||
strcmp(field->type, "unsigned int") == 0 ||
strcmp(field->type, "long") == 0) &&
len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
/*
* /sys/kernel/tracing/events/syscalls/sys_enter*
* grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
* 65 int
* 23 unsigned int
* 7 unsigned long
*/
arg->scnprintf = SCA_FD;
} else {
const struct syscall_arg_fmt *fmt =
syscall_arg_fmt__find_by_name(field->name);
if (fmt) {
arg->scnprintf = fmt->scnprintf;
arg->strtoul = fmt->strtoul;
}
}
}
return last_field;
}
static int syscall__set_arg_fmts(struct syscall *sc)
{
struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
if (last_field)
sc->args_size = last_field->offset + last_field->size;
return 0;
}
static int trace__read_syscall_info(struct trace *trace, int id)
{
char tp_name[128];
struct syscall *sc;
const char *name = syscalltbl__name(trace->sctbl, id);
#ifdef HAVE_SYSCALL_TABLE_SUPPORT
if (trace->syscalls.table == NULL) {
trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
if (trace->syscalls.table == NULL)
return -ENOMEM;
}
#else
if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
// When using libaudit we don't know beforehand what is the max syscall id
struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
if (table == NULL)
return -ENOMEM;
// Need to memset from offset 0 and +1 members if brand new
if (trace->syscalls.table == NULL)
memset(table, 0, (id + 1) * sizeof(*sc));
else
memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
trace->syscalls.table = table;
trace->sctbl->syscalls.max_id = id;
}
#endif
sc = trace->syscalls.table + id;
if (sc->nonexistent)
return -EEXIST;
if (name == NULL) {
sc->nonexistent = true;
return -EEXIST;
}
sc->name = name;
sc->fmt = syscall_fmt__find(sc->name);
snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
sc->tp_format = trace_event__tp_format("syscalls", tp_name);
if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
sc->tp_format = trace_event__tp_format("syscalls", tp_name);
}
/*
* Fails to read trace point format via sysfs node, so the trace point
* doesn't exist. Set the 'nonexistent' flag as true.
*/
if (IS_ERR(sc->tp_format)) {
sc->nonexistent = true;
return PTR_ERR(sc->tp_format);
}
if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
return -ENOMEM;
sc->args = sc->tp_format->format.fields;
/*
* We need to check and discard the first variable '__syscall_nr'
* or 'nr' that mean the syscall number. It is needless here.
* So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
*/
if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
sc->args = sc->args->next;
--sc->nr_args;
}
sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
return syscall__set_arg_fmts(sc);
}
static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
{
struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
if (fmt != NULL) {
syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
return 0;
}
return -ENOMEM;
}
static int intcmp(const void *a, const void *b)
{
const int *one = a, *another = b;
return *one - *another;
}
static int trace__validate_ev_qualifier(struct trace *trace)
{
int err = 0;
bool printed_invalid_prefix = false;
struct str_node *pos;
size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
trace->ev_qualifier_ids.entries = malloc(nr_allocated *
sizeof(trace->ev_qualifier_ids.entries[0]));
if (trace->ev_qualifier_ids.entries == NULL) {
fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
trace->output);
err = -EINVAL;
goto out;
}
strlist__for_each_entry(pos, trace->ev_qualifier) {
const char *sc = pos->s;
int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
if (id < 0) {
id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
if (id >= 0)
goto matches;
if (!printed_invalid_prefix) {
pr_debug("Skipping unknown syscalls: ");
printed_invalid_prefix = true;
} else {
pr_debug(", ");
}
pr_debug("%s", sc);
continue;
}
matches:
trace->ev_qualifier_ids.entries[nr_used++] = id;
if (match_next == -1)
continue;
while (1) {
id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
if (id < 0)
break;
if (nr_allocated == nr_used) {
void *entries;
nr_allocated += 8;
entries = realloc(trace->ev_qualifier_ids.entries,
nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
if (entries == NULL) {
err = -ENOMEM;
fputs("\nError:\t Not enough memory for parsing\n", trace->output);
goto out_free;
}
trace->ev_qualifier_ids.entries = entries;
}
trace->ev_qualifier_ids.entries[nr_used++] = id;
}
}
trace->ev_qualifier_ids.nr = nr_used;
qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
out:
if (printed_invalid_prefix)
pr_debug("\n");
return err;
out_free:
zfree(&trace->ev_qualifier_ids.entries);
trace->ev_qualifier_ids.nr = 0;
goto out;
}
static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
{
bool in_ev_qualifier;
if (trace->ev_qualifier_ids.nr == 0)
return true;
in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
if (in_ev_qualifier)
return !trace->not_ev_qualifier;
return trace->not_ev_qualifier;
}
/*
* args is to be interpreted as a series of longs but we need to handle
* 8-byte unaligned accesses. args points to raw_data within the event
* and raw_data is guaranteed to be 8-byte unaligned because it is
* preceded by raw_size which is a u32. So we need to copy args to a temp
* variable to read it. Most notably this avoids extended load instructions
* on unaligned addresses
*/
unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
{
unsigned long val;
unsigned char *p = arg->args + sizeof(unsigned long) * idx;
memcpy(&val, p, sizeof(val));
return val;
}
static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
struct syscall_arg *arg)
{
if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
return scnprintf(bf, size, "arg%d: ", arg->idx);
}
/*
* Check if the value is in fact zero, i.e. mask whatever needs masking, such
* as mount 'flags' argument that needs ignoring some magic flag, see comment
* in tools/perf/trace/beauty/mount_flags.c
*/
static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
{
if (fmt && fmt->mask_val)
return fmt->mask_val(arg, val);
return val;
}
static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
struct syscall_arg *arg, unsigned long val)
{
if (fmt && fmt->scnprintf) {
arg->val = val;
if (fmt->parm)
arg->parm = fmt->parm;
return fmt->scnprintf(bf, size, arg);
}
return scnprintf(bf, size, "%ld", val);
}
static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
unsigned char *args, void *augmented_args, int augmented_args_size,
struct trace *trace, struct thread *thread)
{
size_t printed = 0;
unsigned long val;
u8 bit = 1;
struct syscall_arg arg = {
.args = args,
.augmented = {
.size = augmented_args_size,
.args = augmented_args,
},
.idx = 0,
.mask = 0,
.trace = trace,
.thread = thread,
.show_string_prefix = trace->show_string_prefix,
};
struct thread_trace *ttrace = thread__priv(thread);
/*
* Things like fcntl will set this in its 'cmd' formatter to pick the
* right formatter for the return value (an fd? file flags?), which is
* not needed for syscalls that always return a given type, say an fd.
*/
ttrace->ret_scnprintf = NULL;
if (sc->args != NULL) {
struct tep_format_field *field;
for (field = sc->args; field;
field = field->next, ++arg.idx, bit <<= 1) {
if (arg.mask & bit)
continue;
arg.fmt = &sc->arg_fmt[arg.idx];
val = syscall_arg__val(&arg, arg.idx);
/*
* Some syscall args need some mask, most don't and
* return val untouched.
*/
val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
/*
* Suppress this argument if its value is zero and
* and we don't have a string associated in an
* strarray for it.
*/
if (val == 0 &&
!trace->show_zeros &&
!(sc->arg_fmt &&
(sc->arg_fmt[arg.idx].show_zero ||
sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
sc->arg_fmt[arg.idx].parm))
continue;
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
if (trace->show_arg_names)
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
bf + printed, size - printed, &arg, val);
}
} else if (IS_ERR(sc->tp_format)) {
/*
* If we managed to read the tracepoint /format file, then we
* may end up not having any args, like with gettid(), so only
* print the raw args when we didn't manage to read it.
*/
while (arg.idx < sc->nr_args) {
if (arg.mask & bit)
goto next_arg;
val = syscall_arg__val(&arg, arg.idx);
if (printed)
printed += scnprintf(bf + printed, size - printed, ", ");
printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
next_arg:
++arg.idx;
bit <<= 1;
}
}
return printed;
}
typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
union perf_event *event,
struct perf_sample *sample);
static struct syscall *trace__syscall_info(struct trace *trace,
struct evsel *evsel, int id)
{
int err = 0;
if (id < 0) {
/*
* XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
* before that, leaving at a higher verbosity level till that is
* explained. Reproduced with plain ftrace with:
*
* echo 1 > /t/events/raw_syscalls/sys_exit/enable
* grep "NR -1 " /t/trace_pipe
*
* After generating some load on the machine.
*/
if (verbose > 1) {
static u64 n;
fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
id, evsel__name(evsel), ++n);
}
return NULL;
}
err = -EINVAL;
#ifdef HAVE_SYSCALL_TABLE_SUPPORT
if (id > trace->sctbl->syscalls.max_id) {
#else
if (id >= trace->sctbl->syscalls.max_id) {
/*
* With libaudit we don't know beforehand what is the max_id,
* so we let trace__read_syscall_info() figure that out as we
* go on reading syscalls.
*/
err = trace__read_syscall_info(trace, id);
if (err)
#endif
goto out_cant_read;
}
if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
(err = trace__read_syscall_info(trace, id)) != 0)
goto out_cant_read;
if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
goto out_cant_read;
return &trace->syscalls.table[id];
out_cant_read:
if (verbose > 0) {
char sbuf[STRERR_BUFSIZE];
fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
fputs(" information\n", trace->output);
}
return NULL;
}
struct syscall_stats {
struct stats stats;
u64 nr_failures;
int max_errno;
u32 *errnos;
};
static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
int id, struct perf_sample *sample, long err, bool errno_summary)
{
struct int_node *inode;
struct syscall_stats *stats;
u64 duration = 0;
inode = intlist__findnew(ttrace->syscall_stats, id);
if (inode == NULL)
return;
stats = inode->priv;
if (stats == NULL) {
stats = zalloc(sizeof(*stats));
if (stats == NULL)
return;
init_stats(&stats->stats);
inode->priv = stats;
}
if (ttrace->entry_time && sample->time > ttrace->entry_time)
duration = sample->time - ttrace->entry_time;
update_stats(&stats->stats, duration);
if (err < 0) {
++stats->nr_failures;
if (!errno_summary)
return;
err = -err;
if (err > stats->max_errno) {
u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
if (new_errnos) {
memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
} else {
pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
thread__comm_str(thread), thread__pid(thread),
thread__tid(thread));
return;
}
stats->errnos = new_errnos;
stats->max_errno = err;
}
++stats->errnos[err - 1];
}
}
static int trace__printf_interrupted_entry(struct trace *trace)
{
struct thread_trace *ttrace;
size_t printed;
int len;
if (trace->failure_only || trace->current == NULL)
return 0;
ttrace = thread__priv(trace->current);
if (!ttrace->entry_pending)
return 0;
printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
if (len < trace->args_alignment - 4)
printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
printed += fprintf(trace->output, " ...\n");
ttrace->entry_pending = false;
++trace->nr_events_printed;
return printed;
}
static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
struct perf_sample *sample, struct thread *thread)
{
int printed = 0;
if (trace->print_sample) {
double ts = (double)sample->time / NSEC_PER_MSEC;
printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
evsel__name(evsel), ts,
thread__comm_str(thread),
sample->pid, sample->tid, sample->cpu);
}
return printed;
}
static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
{
void *augmented_args = NULL;
/*
* For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
* and there we get all 6 syscall args plus the tracepoint common fields
* that gets calculated at the start and the syscall_nr (another long).
* So we check if that is the case and if so don't look after the
* sc->args_size but always after the full raw_syscalls:sys_enter payload,
* which is fixed.
*
* We'll revisit this later to pass s->args_size to the BPF augmenter
* (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
* copies only what we need for each syscall, like what happens when we
* use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
* traffic to just what is needed for each syscall.
*/
int args_size = raw_augmented_args_size ?: sc->args_size;
*augmented_args_size = sample->raw_size - args_size;
if (*augmented_args_size > 0)
augmented_args = sample->raw_data + args_size;
return augmented_args;
}
static void syscall__exit(struct syscall *sc)
{
if (!sc)
return;
zfree(&sc->arg_fmt);
}
static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
char *msg;
void *args;
int printed = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
int augmented_args_size = 0;
void *augmented_args = NULL;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
if (sc == NULL)
return -1;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
goto out_put;
trace__fprintf_sample(trace, evsel, sample, thread);
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
if (ttrace->entry_str == NULL) {
ttrace->entry_str = malloc(trace__entry_str_size);
if (!ttrace->entry_str)
goto out_put;
}
if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
trace__printf_interrupted_entry(trace);
/*
* If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
* arguments, even if the syscall being handled, say "openat", uses only 4 arguments
* this breaks syscall__augmented_args() check for augmented args, as we calculate
* syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
* so when handling, say the openat syscall, we end up getting 6 args for the
* raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
* thinking that the extra 2 u64 args are the augmented filename, so just check
* here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
*/
if (evsel != trace->syscalls.events.sys_enter)
augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
args, augmented_args, augmented_args_size, trace, thread);
if (sc->is_exit) {
if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
int alignment = 0;
trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
printed = fprintf(trace->output, "%s)", ttrace->entry_str);
if (trace->args_alignment > printed)
alignment = trace->args_alignment - printed;
fprintf(trace->output, "%*s= ?\n", alignment, " ");
}
} else {
ttrace->entry_pending = true;
/* See trace__vfs_getname & trace__sys_exit */
ttrace->filename.pending_open = false;
}
if (trace->current != thread) {
thread__put(trace->current);
trace->current = thread__get(thread);
}
err = 0;
out_put:
thread__put(thread);
return err;
}
static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
struct perf_sample *sample)
{
struct thread_trace *ttrace;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
char msg[1024];
void *args, *augmented_args = NULL;
int augmented_args_size;
if (sc == NULL)
return -1;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
/*
* We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
* and the rest of the beautifiers accessing it via struct syscall_arg touches it.
*/
if (ttrace == NULL)
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
fprintf(trace->output, "%s", msg);
err = 0;
out_put:
thread__put(thread);
return err;
}
static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
struct perf_sample *sample,
struct callchain_cursor *cursor)
{
struct addr_location al;
int max_stack = evsel->core.attr.sample_max_stack ?
evsel->core.attr.sample_max_stack :
trace->max_stack;
int err = -1;
addr_location__init(&al);
if (machine__resolve(trace->host, &al, sample) < 0)
goto out;
err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
out:
addr_location__exit(&al);
return err;
}
static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
{
/* TODO: user-configurable print_opts */
const unsigned int print_opts = EVSEL__PRINT_SYM |
EVSEL__PRINT_DSO |
EVSEL__PRINT_UNKNOWN_AS_ADDR;
return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
}
static const char *errno_to_name(struct evsel *evsel, int err)
{
struct perf_env *env = evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
return arch_syscalls__strerrno(arch_name, err);
}
static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
long ret;
u64 duration = 0;
bool duration_calculated = false;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
int alignment = trace->args_alignment;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
if (sc == NULL)
return -1;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
goto out_put;
trace__fprintf_sample(trace, evsel, sample, thread);
ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
if (trace->summary)
thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
trace__set_fd_pathname(thread, ret, ttrace->filename.name);
ttrace->filename.pending_open = false;
++trace->stats.vfs_getname;
}
if (ttrace->entry_time) {
duration = sample->time - ttrace->entry_time;
if (trace__filter_duration(trace, duration))
goto out;
duration_calculated = true;
} else if (trace->duration_filter)
goto out;
if (sample->callchain) {
struct callchain_cursor *cursor = get_tls_callchain_cursor();
callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
if (callchain_ret == 0) {
if (cursor->nr < trace->min_stack)
goto out;
callchain_ret = 1;
}
}
if (trace->summary_only || (ret >= 0 && trace->failure_only))
goto out;
trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
if (ttrace->entry_pending) {
printed = fprintf(trace->output, "%s", ttrace->entry_str);
} else {
printed += fprintf(trace->output, " ... [");
color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
printed += 9;
printed += fprintf(trace->output, "]: %s()", sc->name);
}
printed++; /* the closing ')' */
if (alignment > printed)
alignment -= printed;
else
alignment = 0;
fprintf(trace->output, ")%*s= ", alignment, " ");
if (sc->fmt == NULL) {
if (ret < 0)
goto errno_print;
signed_print:
fprintf(trace->output, "%ld", ret);
} else if (ret < 0) {
errno_print: {
char bf[STRERR_BUFSIZE];
const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
*e = errno_to_name(evsel, -ret);
fprintf(trace->output, "-1 %s (%s)", e, emsg);
}
} else if (ret == 0 && sc->fmt->timeout)
fprintf(trace->output, "0 (Timeout)");
else if (ttrace->ret_scnprintf) {
char bf[1024];
struct syscall_arg arg = {
.val = ret,
.thread = thread,
.trace = trace,
};
ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
ttrace->ret_scnprintf = NULL;
fprintf(trace->output, "%s", bf);
} else if (sc->fmt->hexret)
fprintf(trace->output, "%#lx", ret);
else if (sc->fmt->errpid) {
struct thread *child = machine__find_thread(trace->host, ret, ret);
if (child != NULL) {
fprintf(trace->output, "%ld", ret);
if (thread__comm_set(child))
fprintf(trace->output, " (%s)", thread__comm_str(child));
thread__put(child);
}
} else
goto signed_print;
fputc('\n', trace->output);
/*
* We only consider an 'event' for the sake of --max-events a non-filtered
* sys_enter + sys_exit and other tracepoint events.
*/
if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
interrupted = true;
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
out:
ttrace->entry_pending = false;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
struct thread_trace *ttrace;
size_t filename_len, entry_str_len, to_move;
ssize_t remaining_space;
char *pos;
const char *filename = evsel__rawptr(evsel, sample, "pathname");
if (!thread)
goto out;
ttrace = thread__priv(thread);
if (!ttrace)
goto out_put;
filename_len = strlen(filename);
if (filename_len == 0)
goto out_put;
if (ttrace->filename.namelen < filename_len) {
char *f = realloc(ttrace->filename.name, filename_len + 1);
if (f == NULL)
goto out_put;
ttrace->filename.namelen = filename_len;
ttrace->filename.name = f;
}
strcpy(ttrace->filename.name, filename);
ttrace->filename.pending_open = true;
if (!ttrace->filename.ptr)
goto out_put;
entry_str_len = strlen(ttrace->entry_str);
remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
if (remaining_space <= 0)
goto out_put;
if (filename_len > (size_t)remaining_space) {
filename += filename_len - remaining_space;
filename_len = remaining_space;
}
to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
memmove(pos + filename_len, pos, to_move);
memcpy(pos, filename, filename_len);
ttrace->filename.ptr = 0;
ttrace->filename.entry_str_pos = 0;
out_put:
thread__put(thread);
out:
return 0;
}
static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
u64 runtime = evsel__intval(evsel, sample, "runtime");
double runtime_ms = (double)runtime / NSEC_PER_MSEC;
struct thread *thread = machine__findnew_thread(trace->host,
sample->pid,
sample->tid);
struct thread_trace *ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
goto out_dump;
ttrace->runtime_ms += runtime_ms;
trace->runtime_ms += runtime_ms;
out_put:
thread__put(thread);
return 0;
out_dump:
fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
evsel->name,
evsel__strval(evsel, sample, "comm"),
(pid_t)evsel__intval(evsel, sample, "pid"),
runtime,
evsel__intval(evsel, sample, "vruntime"));
goto out_put;
}
static int bpf_output__printer(enum binary_printer_ops op,
unsigned int val, void *extra __maybe_unused, FILE *fp)
{
unsigned char ch = (unsigned char)val;
switch (op) {
case BINARY_PRINT_CHAR_DATA:
return fprintf(fp, "%c", isprint(ch) ? ch : '.');
case BINARY_PRINT_DATA_BEGIN:
case BINARY_PRINT_LINE_BEGIN:
case BINARY_PRINT_ADDR:
case BINARY_PRINT_NUM_DATA:
case BINARY_PRINT_NUM_PAD:
case BINARY_PRINT_SEP:
case BINARY_PRINT_CHAR_PAD:
case BINARY_PRINT_LINE_END:
case BINARY_PRINT_DATA_END:
default:
break;
}
return 0;
}
static void bpf_output__fprintf(struct trace *trace,
struct perf_sample *sample)
{
binary__fprintf(sample->raw_data, sample->raw_size, 8,
bpf_output__printer, NULL, trace->output);
++trace->nr_events_printed;
}
static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
struct thread *thread, void *augmented_args, int augmented_args_size)
{
char bf[2048];
size_t size = sizeof(bf);
struct tep_format_field *field = evsel->tp_format->format.fields;
struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
size_t printed = 0;
unsigned long val;
u8 bit = 1;
struct syscall_arg syscall_arg = {
.augmented = {
.size = augmented_args_size,
.args = augmented_args,
},
.idx = 0,
.mask = 0,
.trace = trace,
.thread = thread,
.show_string_prefix = trace->show_string_prefix,
};
for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
if (syscall_arg.mask & bit)
continue;
syscall_arg.len = 0;
syscall_arg.fmt = arg;
if (field->flags & TEP_FIELD_IS_ARRAY) {
int offset = field->offset;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
offset = format_field__intval(field, sample, evsel->needs_swap);
syscall_arg.len = offset >> 16;
offset &= 0xffff;
if (tep_field_is_relative(field->flags))
offset += field->offset + field->size;
}
val = (uintptr_t)(sample->raw_data + offset);
} else
val = format_field__intval(field, sample, evsel->needs_swap);
/*
* Some syscall args need some mask, most don't and
* return val untouched.
*/
val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
/*
* Suppress this argument if its value is zero and
* we don't have a string associated in an
* strarray for it.
*/
if (val == 0 &&
!trace->show_zeros &&
!((arg->show_zero ||
arg->scnprintf == SCA_STRARRAY ||
arg->scnprintf == SCA_STRARRAYS) &&
arg->parm))
continue;
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
if (trace->show_arg_names)
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
}
return printed + fprintf(trace->output, "%s", bf);
}
static int trace__event_handler(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
struct thread *thread;
int callchain_ret = 0;
/*
* Check if we called perf_evsel__disable(evsel) due to, for instance,
* this event's max_events having been hit and this is an entry coming
* from the ring buffer that we should discard, since the max events
* have already been considered/printed.
*/
if (evsel->disabled)
return 0;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
if (sample->callchain) {
struct callchain_cursor *cursor = get_tls_callchain_cursor();
callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
if (callchain_ret == 0) {
if (cursor->nr < trace->min_stack)
goto out;
callchain_ret = 1;
}
}
trace__printf_interrupted_entry(trace);
trace__fprintf_tstamp(trace, sample->time, trace->output);
if (trace->trace_syscalls && trace->show_duration)
fprintf(trace->output, "( ): ");
if (thread)
trace__fprintf_comm_tid(trace, thread, trace->output);
if (evsel == trace->syscalls.events.bpf_output) {
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
struct syscall *sc = trace__syscall_info(trace, evsel, id);
if (sc) {
fprintf(trace->output, "%s(", sc->name);
trace__fprintf_sys_enter(trace, evsel, sample);
fputc(')', trace->output);
goto newline;
}
/*
* XXX: Not having the associated syscall info or not finding/adding
* the thread should never happen, but if it does...
* fall thru and print it as a bpf_output event.
*/
}
fprintf(trace->output, "%s(", evsel->name);
if (evsel__is_bpf_output(evsel)) {
bpf_output__fprintf(trace, sample);
} else if (evsel->tp_format) {
if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
trace__fprintf_sys_enter(trace, evsel, sample)) {
if (trace->libtraceevent_print) {
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size,
trace->output);
} else {
trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
}
}
}
newline:
fprintf(trace->output, ")\n");
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
++trace->nr_events_printed;
if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
evsel__disable(evsel);
evsel__close(evsel);
}
out:
thread__put(thread);
return 0;
}
static void print_location(FILE *f, struct perf_sample *sample,
struct addr_location *al,
bool print_dso, bool print_sym)
{
if ((verbose > 0 || print_dso) && al->map)
fprintf(f, "%s@", map__dso(al->map)->long_name);
if ((verbose > 0 || print_sym) && al->sym)
fprintf(f, "%s+0x%" PRIx64, al->sym->name,
al->addr - al->sym->start);
else if (al->map)
fprintf(f, "0x%" PRIx64, al->addr);
else
fprintf(f, "0x%" PRIx64, sample->addr);
}
static int trace__pgfault(struct trace *trace,
struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
struct thread *thread;
struct addr_location al;
char map_type = 'd';
struct thread_trace *ttrace;
int err = -1;
int callchain_ret = 0;
addr_location__init(&al);
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
if (sample->callchain) {
struct callchain_cursor *cursor = get_tls_callchain_cursor();
callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
if (callchain_ret == 0) {
if (cursor->nr < trace->min_stack)
goto out_put;
callchain_ret = 1;
}
}
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
goto out_put;
if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
ttrace->pfmaj++;
else
ttrace->pfmin++;
if (trace->summary_only)
goto out;
thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
fprintf(trace->output, "%sfault [",
evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
"maj" : "min");
print_location(trace->output, sample, &al, false, true);
fprintf(trace->output, "] => ");
thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
if (!al.map) {
thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
if (al.map)
map_type = 'x';
else
map_type = '?';
}
print_location(trace->output, sample, &al, true, false);
fprintf(trace->output, " (%c%c)\n", map_type, al.level);
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
++trace->nr_events_printed;
out:
err = 0;
out_put:
thread__put(thread);
addr_location__exit(&al);
return err;
}
static void trace__set_base_time(struct trace *trace,
struct evsel *evsel,
struct perf_sample *sample)
{
/*
* BPF events were not setting PERF_SAMPLE_TIME, so be more robust
* and don't use sample->time unconditionally, we may end up having
* some other event in the future without PERF_SAMPLE_TIME for good
* reason, i.e. we may not be interested in its timestamps, just in
* it taking place, picking some piece of information when it
* appears in our event stream (vfs_getname comes to mind).
*/
if (trace->base_time == 0 && !trace->full_time &&
(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
trace->base_time = sample->time;
}
static int trace__process_sample(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine __maybe_unused)
{
struct trace *trace = container_of(tool, struct trace, tool);
struct thread *thread;
int err = 0;
tracepoint_handler handler = evsel->handler;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
if (thread && thread__is_filtered(thread))
goto out;
trace__set_base_time(trace, evsel, sample);
if (handler) {
++trace->nr_events;
handler(trace, evsel, event, sample);
}
out:
thread__put(thread);
return err;
}
static int trace__record(struct trace *trace, int argc, const char **argv)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
const char * const record_args[] = {
"record",
"-R",
"-m", "1024",
"-c", "1",
};
pid_t pid = getpid();
char *filter = asprintf__tp_filter_pids(1, &pid);
const char * const sc_args[] = { "-e", };
unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
const char * const majpf_args[] = { "-e", "major-faults" };
unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
const char * const minpf_args[] = { "-e", "minor-faults" };
unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
int err = -1;
/* +3 is for the event string below and the pid filter */
rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
majpf_args_nr + minpf_args_nr + argc;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL || filter == NULL)
goto out_free;
j = 0;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[j++] = record_args[i];
if (trace->trace_syscalls) {
for (i = 0; i < sc_args_nr; i++)
rec_argv[j++] = sc_args[i];
/* event string may be different for older kernels - e.g., RHEL6 */
if (is_valid_tracepoint("raw_syscalls:sys_enter"))
rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
else if (is_valid_tracepoint("syscalls:sys_enter"))
rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
else {
pr_err("Neither raw_syscalls nor syscalls events exist.\n");
goto out_free;
}
}
rec_argv[j++] = "--filter";
rec_argv[j++] = filter;
if (trace->trace_pgfaults & TRACE_PFMAJ)
for (i = 0; i < majpf_args_nr; i++)
rec_argv[j++] = majpf_args[i];
if (trace->trace_pgfaults & TRACE_PFMIN)
for (i = 0; i < minpf_args_nr; i++)
rec_argv[j++] = minpf_args[i];
for (i = 0; i < (unsigned int)argc; i++)
rec_argv[j++] = argv[i];
err = cmd_record(j, rec_argv);
out_free:
free(filter);
free(rec_argv);
return err;
}
static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
static bool evlist__add_vfs_getname(struct evlist *evlist)
{
bool found = false;
struct evsel *evsel, *tmp;
struct parse_events_error err;
int ret;
parse_events_error__init(&err);
ret = parse_events(evlist, "probe:vfs_getname*", &err);
parse_events_error__exit(&err);
if (ret)
return false;
evlist__for_each_entry_safe(evlist, evsel, tmp) {
if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
continue;
if (evsel__field(evsel, "pathname")) {
evsel->handler = trace__vfs_getname;
found = true;
continue;
}
list_del_init(&evsel->core.node);
evsel->evlist = NULL;
evsel__delete(evsel);
}
return found;
}
static struct evsel *evsel__new_pgfault(u64 config)
{
struct evsel *evsel;
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.mmap_data = 1,
};
attr.config = config;
attr.sample_period = 1;
event_attr_init(&attr);
evsel = evsel__new(&attr);
if (evsel)
evsel->handler = trace__pgfault;
return evsel;
}
static void evlist__free_syscall_tp_fields(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
evsel_trace__delete(evsel->priv);
evsel->priv = NULL;
}
}
static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
{
const u32 type = event->header.type;
struct evsel *evsel;
if (type != PERF_RECORD_SAMPLE) {
trace__process_event(trace, trace->host, event, sample);
return;
}
evsel = evlist__id2evsel(trace->evlist, sample->id);
if (evsel == NULL) {
fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
return;
}
if (evswitch__discard(&trace->evswitch, evsel))
return;
trace__set_base_time(trace, evsel, sample);
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
sample->raw_data == NULL) {
fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
evsel__name(evsel), sample->tid,
sample->cpu, sample->raw_size);
} else {
tracepoint_handler handler = evsel->handler;
handler(trace, evsel, event, sample);
}
if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
interrupted = true;
}
static int trace__add_syscall_newtp(struct trace *trace)
{
int ret = -1;
struct evlist *evlist = trace->evlist;
struct evsel *sys_enter, *sys_exit;
sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
if (sys_enter == NULL)
goto out;
if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
goto out_delete_sys_enter;
sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
if (sys_exit == NULL)
goto out_delete_sys_enter;
if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
goto out_delete_sys_exit;
evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
evlist__add(evlist, sys_enter);
evlist__add(evlist, sys_exit);
if (callchain_param.enabled && !trace->kernel_syscallchains) {
/*
* We're interested only in the user space callchain
* leading to the syscall, allow overriding that for
* debugging reasons using --kernel_syscall_callchains
*/
sys_exit->core.attr.exclude_callchain_kernel = 1;
}
trace->syscalls.events.sys_enter = sys_enter;
trace->syscalls.events.sys_exit = sys_exit;
ret = 0;
out:
return ret;
out_delete_sys_exit:
evsel__delete_priv(sys_exit);
out_delete_sys_enter:
evsel__delete_priv(sys_enter);
goto out;
}
static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
{
int err = -1;
struct evsel *sys_exit;
char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
trace->ev_qualifier_ids.nr,
trace->ev_qualifier_ids.entries);
if (filter == NULL)
goto out_enomem;
if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
sys_exit = trace->syscalls.events.sys_exit;
err = evsel__append_tp_filter(sys_exit, filter);
}
free(filter);
out:
return err;
out_enomem:
errno = ENOMEM;
goto out;
}
#ifdef HAVE_BPF_SKEL
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
{
struct bpf_program *pos, *prog = NULL;
const char *sec_name;
if (trace->skel->obj == NULL)
return NULL;
bpf_object__for_each_program(pos, trace->skel->obj) {
sec_name = bpf_program__section_name(pos);
if (sec_name && !strcmp(sec_name, name)) {
prog = pos;
break;
}
}
return prog;
}
static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
const char *prog_name, const char *type)
{
struct bpf_program *prog;
if (prog_name == NULL) {
char default_prog_name[256];
scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
prog = trace__find_bpf_program_by_title(trace, default_prog_name);
if (prog != NULL)
goto out_found;
if (sc->fmt && sc->fmt->alias) {
scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
prog = trace__find_bpf_program_by_title(trace, default_prog_name);
if (prog != NULL)
goto out_found;
}
goto out_unaugmented;
}
prog = trace__find_bpf_program_by_title(trace, prog_name);
if (prog != NULL) {
out_found:
return prog;
}
pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
prog_name, type, sc->name);
out_unaugmented:
return trace->skel->progs.syscall_unaugmented;
}
static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
{
struct syscall *sc = trace__syscall_info(trace, NULL, id);
if (sc == NULL)
return;
sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
}
static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
{
struct syscall *sc = trace__syscall_info(trace, NULL, id);
return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
{
struct syscall *sc = trace__syscall_info(trace, NULL, id);
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
{
struct tep_format_field *field, *candidate_field;
int id;
/*
* We're only interested in syscalls that have a pointer:
*/
for (field = sc->args; field; field = field->next) {
if (field->flags & TEP_FIELD_IS_POINTER)
goto try_to_find_pair;
}
return NULL;
try_to_find_pair:
for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
struct syscall *pair = trace__syscall_info(trace, NULL, id);
struct bpf_program *pair_prog;
bool is_candidate = false;
if (pair == NULL || pair == sc ||
pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
continue;
for (field = sc->args, candidate_field = pair->args;
field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
if (is_pointer) {
if (!candidate_is_pointer) {
// The candidate just doesn't copies our pointer arg, might copy other pointers we want.
continue;
}
} else {
if (candidate_is_pointer) {
// The candidate might copy a pointer we don't have, skip it.
goto next_candidate;
}
continue;
}
if (strcmp(field->type, candidate_field->type))
goto next_candidate;
/*
* This is limited in the BPF program but sys_write
* uses "const char *" for its "buf" arg so we need to
* use some heuristic that is kinda future proof...
*/
if (strcmp(field->type, "const char *") == 0 &&
!(strstr(field->name, "name") ||
strstr(field->name, "path") ||
strstr(field->name, "file") ||
strstr(field->name, "root") ||
strstr(field->name, "description")))
goto next_candidate;
is_candidate = true;
}
if (!is_candidate)
goto next_candidate;
/*
* Check if the tentative pair syscall augmenter has more pointers, if it has,
* then it may be collecting that and we then can't use it, as it would collect
* more than what is common to the two syscalls.
*/
if (candidate_field) {
for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
if (candidate_field->flags & TEP_FIELD_IS_POINTER)
goto next_candidate;
}
pair_prog = pair->bpf_prog.sys_enter;
/*
* If the pair isn't enabled, then its bpf_prog.sys_enter will not
* have been searched for, so search it here and if it returns the
* unaugmented one, then ignore it, otherwise we'll reuse that BPF
* program for a filtered syscall on a non-filtered one.
*
* For instance, we have "!syscalls:sys_enter_renameat" and that is
* useful for "renameat2".
*/
if (pair_prog == NULL) {
pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
if (pair_prog == trace->skel->progs.syscall_unaugmented)
goto next_candidate;
}
pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
return pair_prog;
next_candidate:
continue;
}
return NULL;
}
static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
{
int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
int err = 0, key;
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
int prog_fd;
if (!trace__syscall_enabled(trace, key))
continue;
trace__init_syscall_bpf_progs(trace, key);
// It'll get at least the "!raw_syscalls:unaugmented"
prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
}
/*
* Now lets do a second pass looking for enabled syscalls without
* an augmenter that have a signature that is a superset of another
* syscall with an augmenter so that we can auto-reuse it.
*
* I.e. if we have an augmenter for the "open" syscall that has
* this signature:
*
* int open(const char *pathname, int flags, mode_t mode);
*
* I.e. that will collect just the first string argument, then we
* can reuse it for the 'creat' syscall, that has this signature:
*
* int creat(const char *pathname, mode_t mode);
*
* and for:
*
* int stat(const char *pathname, struct stat *statbuf);
* int lstat(const char *pathname, struct stat *statbuf);
*
* Because the 'open' augmenter will collect the first arg as a string,
* and leave alone all the other args, which already helps with
* beautifying 'stat' and 'lstat''s pathname arg.
*
* Then, in time, when 'stat' gets an augmenter that collects both
* first and second arg (this one on the raw_syscalls:sys_exit prog
* array tail call, then that one will be used.
*/
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
struct syscall *sc = trace__syscall_info(trace, NULL, key);
struct bpf_program *pair_prog;
int prog_fd;
if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
continue;
/*
* For now we're just reusing the sys_enter prog, and if it
* already has an augmenter, we don't need to find one.
*/
if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
continue;
/*
* Look at all the other syscalls for one that has a signature
* that is close enough that we can share:
*/
pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
if (pair_prog == NULL)
continue;
sc->bpf_prog.sys_enter = pair_prog;
/*
* Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
* with the fd for the program we're reusing:
*/
prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
}
return err;
}
#endif // HAVE_BPF_SKEL
static int trace__set_ev_qualifier_filter(struct trace *trace)
{
if (trace->syscalls.events.sys_enter)
return trace__set_ev_qualifier_tp_filter(trace);
return 0;
}
static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
size_t npids __maybe_unused, pid_t *pids __maybe_unused)
{
int err = 0;
#ifdef HAVE_LIBBPF_SUPPORT
bool value = true;
int map_fd = bpf_map__fd(map);
size_t i;
for (i = 0; i < npids; ++i) {
err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
if (err)
break;
}
#endif
return err;
}
static int trace__set_filter_loop_pids(struct trace *trace)
{
unsigned int nr = 1, err;
pid_t pids[32] = {
getpid(),
};
struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
while (thread && nr < ARRAY_SIZE(pids)) {
struct thread *parent = machine__find_thread(trace->host,
thread__ppid(thread),
thread__ppid(thread));
if (parent == NULL)
break;
if (!strcmp(thread__comm_str(parent), "sshd") ||
strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = thread__tid(parent);
break;
}
thread = parent;
}
err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
if (!err && trace->filter_pids.map)
err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
return err;
}
static int trace__set_filter_pids(struct trace *trace)
{
int err = 0;
/*
* Better not use !target__has_task() here because we need to cover the
* case where no threads were specified in the command line, but a
* workload was, and in that case we will fill in the thread_map when
* we fork the workload in evlist__prepare_workload.
*/
if (trace->filter_pids.nr > 0) {
err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
trace->filter_pids.entries);
if (!err && trace->filter_pids.map) {
err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
trace->filter_pids.entries);
}
} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
err = trace__set_filter_loop_pids(trace);
}
return err;
}
static int __trace__deliver_event(struct trace *trace, union perf_event *event)
{
struct evlist *evlist = trace->evlist;
struct perf_sample sample;
int err = evlist__parse_sample(evlist, event, &sample);
if (err)
fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
else
trace__handle_event(trace, event, &sample);
return 0;
}
static int __trace__flush_events(struct trace *trace)
{
u64 first = ordered_events__first_time(&trace->oe.data);
u64 flush = trace->oe.last - NSEC_PER_SEC;
/* Is there some thing to flush.. */
if (first && first < flush)
return ordered_events__flush_time(&trace->oe.data, flush);
return 0;
}
static int trace__flush_events(struct trace *trace)
{
return !trace->sort_events ? 0 : __trace__flush_events(trace);
}
static int trace__deliver_event(struct trace *trace, union perf_event *event)
{
int err;
if (!trace->sort_events)
return __trace__deliver_event(trace, event);
err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
if (err && err != -1)
return err;
err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
if (err)
return err;
return trace__flush_events(trace);
}
static int ordered_events__deliver_event(struct ordered_events *oe,
struct ordered_event *event)
{
struct trace *trace = container_of(oe, struct trace, oe.data);
return __trace__deliver_event(trace, event->event);
}
static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
{
struct tep_format_field *field;
struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
if (evsel->tp_format == NULL || fmt == NULL)
return NULL;
for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
if (strcmp(field->name, arg) == 0)
return fmt;
return NULL;
}
static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
{
char *tok, *left = evsel->filter, *new_filter = evsel->filter;
while ((tok = strpbrk(left, "=<>!")) != NULL) {
char *right = tok + 1, *right_end;
if (*right == '=')
++right;
while (isspace(*right))
++right;
if (*right == '\0')
break;
while (!isalpha(*left))
if (++left == tok) {
/*
* Bail out, can't find the name of the argument that is being
* used in the filter, let it try to set this filter, will fail later.
*/
return 0;
}
right_end = right + 1;
while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
++right_end;
if (isalpha(*right)) {
struct syscall_arg_fmt *fmt;
int left_size = tok - left,
right_size = right_end - right;
char arg[128];
while (isspace(left[left_size - 1]))
--left_size;
scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
if (fmt == NULL) {
pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
arg, evsel->name, evsel->filter);
return -1;
}
pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
arg, (int)(right - tok), tok, right_size, right);
if (fmt->strtoul) {
u64 val;
struct syscall_arg syscall_arg = {
.parm = fmt->parm,
};
if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
char *n, expansion[19];
int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
int expansion_offset = right - new_filter;
pr_debug("%s", expansion);
if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
pr_debug(" out of memory!\n");
free(new_filter);
return -1;
}
if (new_filter != evsel->filter)
free(new_filter);
left = n + expansion_offset + expansion_lenght;
new_filter = n;
} else {
pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
right_size, right, arg, evsel->name, evsel->filter);
return -1;
}
} else {
pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
arg, evsel->name, evsel->filter);
return -1;
}
pr_debug("\n");
} else {
left = right_end;
}
}
if (new_filter != evsel->filter) {
pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
evsel__set_filter(evsel, new_filter);
free(new_filter);
}
return 0;
}
static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
{
struct evlist *evlist = trace->evlist;
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->filter == NULL)
continue;
if (trace__expand_filter(trace, evsel)) {
*err_evsel = evsel;
return -1;
}
}
return 0;
}
static int trace__run(struct trace *trace, int argc, const char **argv)
{
struct evlist *evlist = trace->evlist;
struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
int err = -1, i;
unsigned long before;
const bool forks = argc > 0;
bool draining = false;
trace->live = true;
if (!trace->raw_augmented_syscalls) {
if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
goto out_error_raw_syscalls;
if (trace->trace_syscalls)
trace->vfs_getname = evlist__add_vfs_getname(evlist);
}
if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
if (pgfault_maj == NULL)
goto out_error_mem;
evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
evlist__add(evlist, pgfault_maj);
}
if ((trace->trace_pgfaults & TRACE_PFMIN)) {
pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
if (pgfault_min == NULL)
goto out_error_mem;
evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
evlist__add(evlist, pgfault_min);
}
/* Enable ignoring missing threads when -u/-p option is defined. */
trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
if (trace->sched &&
evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
goto out_error_sched_stat_runtime;
/*
* If a global cgroup was set, apply it to all the events without an
* explicit cgroup. I.e.:
*
* trace -G A -e sched:*switch
*
* Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
* _and_ sched:sched_switch to the 'A' cgroup, while:
*
* trace -e sched:*switch -G A
*
* will only set the sched:sched_switch event to the 'A' cgroup, all the
* other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
* a cgroup (on the root cgroup, sys wide, etc).
*
* Multiple cgroups:
*
* trace -G A -e sched:*switch -G B
*
* the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
* to the 'B' cgroup.
*
* evlist__set_default_cgroup() grabs a reference of the passed cgroup
* only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
*/
if (trace->cgroup)
evlist__set_default_cgroup(trace->evlist, trace->cgroup);
err = evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
goto out_delete_evlist;
}
err = trace__symbols_init(trace, evlist);
if (err < 0) {
fprintf(trace->output, "Problems initializing symbol libraries!\n");
goto out_delete_evlist;
}
evlist__config(evlist, &trace->opts, &callchain_param);
if (forks) {
err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
if (err < 0) {
fprintf(trace->output, "Couldn't run the workload!\n");
goto out_delete_evlist;
}
workload_pid = evlist->workload.pid;
}
err = evlist__open(evlist);
if (err < 0)
goto out_error_open;
#ifdef HAVE_BPF_SKEL
if (trace->syscalls.events.bpf_output) {
struct perf_cpu cpu;
/*
* Set up the __augmented_syscalls__ BPF map to hold for each
* CPU the bpf-output event's file descriptor.
*/
perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
&cpu.cpu, sizeof(int),
xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
cpu.cpu, 0),
sizeof(__u32), BPF_ANY);
}
}
#endif
err = trace__set_filter_pids(trace);
if (err < 0)
goto out_error_mem;
#ifdef HAVE_BPF_SKEL
if (trace->skel && trace->skel->progs.sys_enter)
trace__init_syscalls_bpf_prog_array_maps(trace);
#endif
if (trace->ev_qualifier_ids.nr > 0) {
err = trace__set_ev_qualifier_filter(trace);
if (err < 0)
goto out_errno;
if (trace->syscalls.events.sys_exit) {
pr_debug("event qualifier tracepoint filter: %s\n",
trace->syscalls.events.sys_exit->filter);
}
}
/*
* If the "close" syscall is not traced, then we will not have the
* opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
* fd->pathname table and were ending up showing the last value set by
* syscalls opening a pathname and associating it with a descriptor or
* reading it from /proc/pid/fd/ in cases where that doesn't make
* sense.
*
* So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
* not in use.
*/
trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
err = trace__expand_filters(trace, &evsel);
if (err)
goto out_delete_evlist;
err = evlist__apply_filters(evlist, &evsel);
if (err < 0)
goto out_error_apply_filters;
err = evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
evlist__enable(evlist);
if (forks)
evlist__start_workload(evlist);
if (trace->opts.target.initial_delay) {
usleep(trace->opts.target.initial_delay * 1000);
evlist__enable(evlist);
}
trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
perf_thread_map__nr(evlist->core.threads) > 1 ||
evlist__first(evlist)->core.attr.inherit;
/*
* Now that we already used evsel->core.attr to ask the kernel to setup the
* events, lets reuse evsel->core.attr.sample_max_stack as the limit in
* trace__resolve_callchain(), allowing per-event max-stack settings
* to override an explicitly set --max-stack global setting.
*/
evlist__for_each_entry(evlist, evsel) {
if (evsel__has_callchain(evsel) &&
evsel->core.attr.sample_max_stack == 0)
evsel->core.attr.sample_max_stack = trace->max_stack;
}
again:
before = trace->nr_events;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
struct mmap *md;
md = &evlist->mmap[i];
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
++trace->nr_events;
err = trace__deliver_event(trace, event);
if (err)
goto out_disable;
perf_mmap__consume(&md->core);
if (interrupted)
goto out_disable;
if (done && !draining) {
evlist__disable(evlist);
draining = true;
}
}
perf_mmap__read_done(&md->core);
}
if (trace->nr_events == before) {
int timeout = done ? 100 : -1;
if (!draining && evlist__poll(evlist, timeout) > 0) {
if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
draining = true;
goto again;
} else {
if (trace__flush_events(trace))
goto out_disable;
}
} else {
goto again;
}
out_disable:
thread__zput(trace->current);
evlist__disable(evlist);
if (trace->sort_events)
ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
if (!err) {
if (trace->summary)
trace__fprintf_thread_summary(trace, trace->output);
if (trace->show_tool_stats) {
fprintf(trace->output, "Stats:\n "
" vfs_getname : %" PRIu64 "\n"
" proc_getname: %" PRIu64 "\n",
trace->stats.vfs_getname,
trace->stats.proc_getname);
}
}
out_delete_evlist:
trace__symbols__exit(trace);
evlist__free_syscall_tp_fields(evlist);
evlist__delete(evlist);
cgroup__put(trace->cgroup);
trace->evlist = NULL;
trace->live = false;
return err;
{
char errbuf[BUFSIZ];
out_error_sched_stat_runtime:
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
goto out_error;
out_error_raw_syscalls:
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
goto out_error;
out_error_mmap:
evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
goto out_error;
out_error_open:
evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
out_error:
fprintf(trace->output, "%s\n", errbuf);
goto out_delete_evlist;
out_error_apply_filters:
fprintf(trace->output,
"Failed to set filter \"%s\" on event %s with %d (%s)\n",
evsel->filter, evsel__name(evsel), errno,
str_error_r(errno, errbuf, sizeof(errbuf)));
goto out_delete_evlist;
}
out_error_mem:
fprintf(trace->output, "Not enough memory to run!\n");
goto out_delete_evlist;
out_errno:
fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
goto out_delete_evlist;
}
static int trace__replay(struct trace *trace)
{
const struct evsel_str_handler handlers[] = {
{ "probe:vfs_getname", trace__vfs_getname, },
};
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = trace->force,
};
struct perf_session *session;
struct evsel *evsel;
int err = -1;
trace->tool.sample = trace__process_sample;
trace->tool.mmap = perf_event__process_mmap;
trace->tool.mmap2 = perf_event__process_mmap2;
trace->tool.comm = perf_event__process_comm;
trace->tool.exit = perf_event__process_exit;
trace->tool.fork = perf_event__process_fork;
trace->tool.attr = perf_event__process_attr;
trace->tool.tracing_data = perf_event__process_tracing_data;
trace->tool.build_id = perf_event__process_build_id;
trace->tool.namespaces = perf_event__process_namespaces;
trace->tool.ordered_events = true;
trace->tool.ordering_requires_timestamps = true;
/* add tid to output */
trace->multiple_threads = true;
session = perf_session__new(&data, &trace->tool);
if (IS_ERR(session))
return PTR_ERR(session);
if (trace->opts.target.pid)
symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
if (trace->opts.target.tid)
symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
if (symbol__init(&session->header.env) < 0)
goto out;
trace->host = &session->machines.host;
err = perf_session__set_tracepoints_handlers(session, handlers);
if (err)
goto out;
evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
trace->syscalls.events.sys_enter = evsel;
/* older kernels have syscalls tp versus raw_syscalls */
if (evsel == NULL)
evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
if (evsel &&
(evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
pr_err("Error during initialize raw_syscalls:sys_enter event\n");
goto out;
}
evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
trace->syscalls.events.sys_exit = evsel;
if (evsel == NULL)
evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
if (evsel &&
(evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
pr_err("Error during initialize raw_syscalls:sys_exit event\n");
goto out;
}
evlist__for_each_entry(session->evlist, evsel) {
if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
(evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
evsel->handler = trace__pgfault;
}
setup_pager();
err = perf_session__process_events(session);
if (err)
pr_err("Failed to process events, error %d", err);
else if (trace->summary)
trace__fprintf_thread_summary(trace, trace->output);
out:
perf_session__delete(session);
return err;
}
static size_t trace__fprintf_threads_header(FILE *fp)
{
size_t printed;
printed = fprintf(fp, "\n Summary of events:\n\n");
return printed;
}
DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
struct syscall_stats *stats;
double msecs;
int syscall;
)
{
struct int_node *source = rb_entry(nd, struct int_node, rb_node);
struct syscall_stats *stats = source->priv;
entry->syscall = source->i;
entry->stats = stats;
entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
}
static size_t thread__dump_stats(struct thread_trace *ttrace,
struct trace *trace, FILE *fp)
{
size_t printed = 0;
struct syscall *sc;
struct rb_node *nd;
DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
if (syscall_stats == NULL)
return 0;
printed += fprintf(fp, "\n");
printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
resort_rb__for_each_entry(nd, syscall_stats) {
struct syscall_stats *stats = syscall_stats_entry->stats;
if (stats) {
double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
double avg = avg_stats(&stats->stats);
double pct;
u64 n = (u64)stats->stats.n;
pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
avg /= NSEC_PER_MSEC;
sc = &trace->syscalls.table[syscall_stats_entry->syscall];
printed += fprintf(fp, " %-15s", sc->name);
printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
if (trace->errno_summary && stats->nr_failures) {
const char *arch_name = perf_env__arch(trace->host->env);
int e;
for (e = 0; e < stats->max_errno; ++e) {
if (stats->errnos[e] != 0)
fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
}
}
}
}
resort_rb__delete(syscall_stats);
printed += fprintf(fp, "\n\n");
return printed;
}
static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
{
size_t printed = 0;
struct thread_trace *ttrace = thread__priv(thread);
double ratio;
if (ttrace == NULL)
return 0;
ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread));
printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
printed += fprintf(fp, "%.1f%%", ratio);
if (ttrace->pfmaj)
printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
if (ttrace->pfmin)
printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
if (trace->sched)
printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
else if (fputc('\n', fp) != EOF)
++printed;
printed += thread__dump_stats(ttrace, trace, fp);
return printed;
}
static unsigned long thread__nr_events(struct thread_trace *ttrace)
{
return ttrace ? ttrace->nr_events : 0;
}
DEFINE_RESORT_RB(threads,
(thread__nr_events(thread__priv(a->thread)) <
thread__nr_events(thread__priv(b->thread))),
struct thread *thread;
)
{
entry->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
}
static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
{
size_t printed = trace__fprintf_threads_header(fp);
struct rb_node *nd;
int i;
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
if (threads == NULL) {
fprintf(fp, "%s", "Error sorting output by nr_events!\n");
return 0;
}
resort_rb__for_each_entry(nd, threads)
printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
resort_rb__delete(threads);
}
return printed;
}
static int trace__set_duration(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct trace *trace = opt->value;
trace->duration_filter = atof(str);
return 0;
}
static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
int unset __maybe_unused)
{
int ret = -1;
size_t i;
struct trace *trace = opt->value;
/*
* FIXME: introduce a intarray class, plain parse csv and create a
* { int nr, int entries[] } struct...
*/
struct intlist *list = intlist__new(str);
if (list == NULL)
return -1;
i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
trace->filter_pids.entries = calloc(i, sizeof(pid_t));
if (trace->filter_pids.entries == NULL)
goto out;
trace->filter_pids.entries[0] = getpid();
for (i = 1; i < trace->filter_pids.nr; ++i)
trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
intlist__delete(list);
ret = 0;
out:
return ret;
}
static int trace__open_output(struct trace *trace, const char *filename)
{
struct stat st;
if (!stat(filename, &st) && st.st_size) {
char oldname[PATH_MAX];
scnprintf(oldname, sizeof(oldname), "%s.old", filename);
unlink(oldname);
rename(filename, oldname);
}
trace->output = fopen(filename, "w");
return trace->output == NULL ? -errno : 0;
}
static int parse_pagefaults(const struct option *opt, const char *str,
int unset __maybe_unused)
{
int *trace_pgfaults = opt->value;
if (strcmp(str, "all") == 0)
*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
else if (strcmp(str, "maj") == 0)
*trace_pgfaults |= TRACE_PFMAJ;
else if (strcmp(str, "min") == 0)
*trace_pgfaults |= TRACE_PFMIN;
else
return -1;
return 0;
}
static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->handler == NULL)
evsel->handler = handler;
}
}
static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
{
struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
if (fmt) {
const struct syscall_fmt *scfmt = syscall_fmt__find(name);
if (scfmt) {
int skip = 0;
if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
++skip;
memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
}
}
}
static int evlist__set_syscall_tp_fields(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->priv || !evsel->tp_format)
continue;
if (strcmp(evsel->tp_format->system, "syscalls")) {
evsel__init_tp_arg_scnprintf(evsel);
continue;
}
if (evsel__init_syscall_tp(evsel))
return -1;
if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
return -1;
evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
} else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
return -1;
evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
}
}
return 0;
}
/*
* XXX: Hackish, just splitting the combined -e+--event (syscalls
* (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
* existing facilities unchanged (trace->ev_qualifier + parse_options()).
*
* It'd be better to introduce a parse_options() variant that would return a
* list with the terms it didn't match to an event...
*/
static int trace__parse_events_option(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct trace *trace = (struct trace *)opt->value;
const char *s = str;
char *sep = NULL, *lists[2] = { NULL, NULL, };
int len = strlen(str) + 1, err = -1, list, idx;
char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
char group_name[PATH_MAX];
const struct syscall_fmt *fmt;
if (strace_groups_dir == NULL)
return -1;
if (*s == '!') {
++s;
trace->not_ev_qualifier = true;
}
while (1) {
if ((sep = strchr(s, ',')) != NULL)
*sep = '\0';
list = 0;
if (syscalltbl__id(trace->sctbl, s) >= 0 ||
syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
list = 1;
goto do_concat;
}
fmt = syscall_fmt__find_by_alias(s);
if (fmt != NULL) {
list = 1;
s = fmt->name;
} else {
path__join(group_name, sizeof(group_name), strace_groups_dir, s);
if (access(group_name, R_OK) == 0)
list = 1;
}
do_concat:
if (lists[list]) {
sprintf(lists[list] + strlen(lists[list]), ",%s", s);
} else {
lists[list] = malloc(len);
if (lists[list] == NULL)
goto out;
strcpy(lists[list], s);
}
if (!sep)
break;
*sep = ',';
s = sep + 1;
}
if (lists[1] != NULL) {
struct strlist_config slist_config = {
.dirname = strace_groups_dir,
};
trace->ev_qualifier = strlist__new(lists[1], &slist_config);
if (trace->ev_qualifier == NULL) {
fputs("Not enough memory to parse event qualifier", trace->output);
goto out;
}
if (trace__validate_ev_qualifier(trace))
goto out;
trace->trace_syscalls = true;
}
err = 0;
if (lists[0]) {
struct parse_events_option_args parse_events_option_args = {
.evlistp = &trace->evlist,
};
struct option o = {
.value = &parse_events_option_args,
};
err = parse_events_option(&o, lists[0], 0);
}
out:
free(strace_groups_dir);
free(lists[0]);
free(lists[1]);
if (sep)
*sep = ',';
return err;
}
static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
{
struct trace *trace = opt->value;
if (!list_empty(&trace->evlist->core.entries)) {
struct option o = {
.value = &trace->evlist,
};
return parse_cgroups(&o, str, unset);
}
trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
return 0;
}
static int trace__config(const char *var, const char *value, void *arg)
{
struct trace *trace = arg;
int err = 0;
if (!strcmp(var, "trace.add_events")) {
trace->perfconfig_events = strdup(value);
if (trace->perfconfig_events == NULL) {
pr_err("Not enough memory for %s\n", "trace.add_events");
return -1;
}
} else if (!strcmp(var, "trace.show_timestamp")) {
trace->show_tstamp = perf_config_bool(var, value);
} else if (!strcmp(var, "trace.show_duration")) {
trace->show_duration = perf_config_bool(var, value);
} else if (!strcmp(var, "trace.show_arg_names")) {
trace->show_arg_names = perf_config_bool(var, value);
if (!trace->show_arg_names)
trace->show_zeros = true;
} else if (!strcmp(var, "trace.show_zeros")) {
bool new_show_zeros = perf_config_bool(var, value);
if (!trace->show_arg_names && !new_show_zeros) {
pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
goto out;
}
trace->show_zeros = new_show_zeros;
} else if (!strcmp(var, "trace.show_prefix")) {
trace->show_string_prefix = perf_config_bool(var, value);
} else if (!strcmp(var, "trace.no_inherit")) {
trace->opts.no_inherit = perf_config_bool(var, value);
} else if (!strcmp(var, "trace.args_alignment")) {
int args_alignment = 0;
if (perf_config_int(&args_alignment, var, value) == 0)
trace->args_alignment = args_alignment;
} else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
if (strcasecmp(value, "libtraceevent") == 0)
trace->libtraceevent_print = true;
else if (strcasecmp(value, "libbeauty") == 0)
trace->libtraceevent_print = false;
}
out:
return err;
}
static void trace__exit(struct trace *trace)
{
int i;
strlist__delete(trace->ev_qualifier);
zfree(&trace->ev_qualifier_ids.entries);
if (trace->syscalls.table) {
for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
syscall__exit(&trace->syscalls.table[i]);
zfree(&trace->syscalls.table);
}
syscalltbl__delete(trace->sctbl);
zfree(&trace->perfconfig_events);
}
#ifdef HAVE_BPF_SKEL
static int bpf__setup_bpf_output(struct evlist *evlist)
{
int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
if (err)
pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
return err;
}
#endif
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
"perf trace [<options>] [<command>]",
"perf trace [<options>] -- <command> [<options>]",
"perf trace record [<options>] [<command>]",
"perf trace record [<options>] -- <command> [<options>]",
NULL
};
struct trace trace = {
.opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.no_buffering = true,
.mmap_pages = UINT_MAX,
},
.output = stderr,
.show_comm = true,
.show_tstamp = true,
.show_duration = true,
.show_arg_names = true,
.args_alignment = 70,
.trace_syscalls = false,
.kernel_syscallchains = false,
.max_stack = UINT_MAX,
.max_events = ULONG_MAX,
};
const char *output_name = NULL;
const struct option trace_options[] = {
OPT_CALLBACK('e', "event", &trace, "event",
"event/syscall selector. use 'perf list' to list available events",
trace__parse_events_option),
OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
"event filter", parse_filter),
OPT_BOOLEAN(0, "comm", &trace.show_comm,
"show the thread COMM next to its id"),
OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
trace__parse_events_option),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
"trace events on existing process id"),
OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
"trace events on existing thread id"),
OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
"pids to filter (by the kernel)", trace__set_filter_pids_from_option),
OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
"child tasks do not inherit counters"),
OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
"number of mmap data pages", evlist__parse_mmap_pages),
OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
"user to profile"),
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time,
"Show full timestamp, not time relative to first start"),
OPT_BOOLEAN(0, "failure", &trace.failure_only,
"Show only syscalls that failed"),
OPT_BOOLEAN('s', "summary", &trace.summary_only,
"Show only syscall summary with statistics"),
OPT_BOOLEAN('S', "with-summary", &trace.summary,
"Show all syscalls and summary with statistics"),
OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
"Show errno stats per syscall, use with -s or -S"),
OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
"Trace pagefaults", parse_pagefaults, "maj"),
OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
OPT_CALLBACK(0, "call-graph", &trace.opts,
"record_mode[,record_size]", record_callchain_help,
&record_parse_callchain_opt),
OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
"Use libtraceevent to print the tracepoint arguments."),
OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
"Show the kernel callchains on the syscall exit path"),
OPT_ULONG(0, "max-events", &trace.max_events,
"Set the maximum number of events to print, exit after that is reached. "),
OPT_UINTEGER(0, "min-stack", &trace.min_stack,
"Set the minimum stack depth when parsing the callchain, "
"anything below the specified depth will be ignored."),
OPT_UINTEGER(0, "max-stack", &trace.max_stack,
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
"Sort batch of events before processing, use if getting out of order events"),
OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
trace__parse_cgroups),
OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
"ms to wait before starting measurement after program "
"start"),
OPTS_EVSWITCH(&trace.evswitch),
OPT_END()
};
bool __maybe_unused max_stack_user_set = true;
bool mmap_pages_user_set = true;
struct evsel *evsel;
const char * const trace_subcommands[] = { "record", NULL };
int err = -1;
char bf[BUFSIZ];
struct sigaction sigchld_act;
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
signal(SIGINT, sighandler_interrupt);
memset(&sigchld_act, 0, sizeof(sigchld_act));
sigchld_act.sa_flags = SA_SIGINFO;
sigchld_act.sa_sigaction = sighandler_chld;
sigaction(SIGCHLD, &sigchld_act, NULL);
trace.evlist = evlist__new();
trace.sctbl = syscalltbl__new();
if (trace.evlist == NULL || trace.sctbl == NULL) {
pr_err("Not enough memory to run!\n");
err = -ENOMEM;
goto out;
}
/*
* Parsing .perfconfig may entail creating a BPF event, that may need
* to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
* is too small. This affects just this process, not touching the
* global setting. If it fails we'll get something in 'perf trace -v'
* to help diagnose the problem.
*/
rlimit__bump_memlock();
err = perf_config(trace__config, &trace);
if (err)
goto out;
argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
/*
* Here we already passed thru trace__parse_events_option() and it has
* already figured out if -e syscall_name, if not but if --event
* foo:bar was used, the user is interested _just_ in those, say,
* tracepoint events, not in the strace-like syscall-name-based mode.
*
* This is important because we need to check if strace-like mode is
* needed to decided if we should filter out the eBPF
* __augmented_syscalls__ code, if it is in the mix, say, via
* .perfconfig trace.add_events, and filter those out.
*/
if (!trace.trace_syscalls && !trace.trace_pgfaults &&
trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
trace.trace_syscalls = true;
}
/*
* Now that we have --verbose figured out, lets see if we need to parse
* events from .perfconfig, so that if those events fail parsing, say some
* BPF program fails, then we'll be able to use --verbose to see what went
* wrong in more detail.
*/
if (trace.perfconfig_events != NULL) {
struct parse_events_error parse_err;
parse_events_error__init(&parse_err);
err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
if (err)
parse_events_error__print(&parse_err, trace.perfconfig_events);
parse_events_error__exit(&parse_err);
if (err)
goto out;
}
if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
usage_with_options_msg(trace_usage, trace_options,
"cgroup monitoring only available in system-wide mode");
}
#ifdef HAVE_BPF_SKEL
if (!trace.trace_syscalls)
goto skip_augmentation;
trace.skel = augmented_raw_syscalls_bpf__open();
if (!trace.skel) {
pr_debug("Failed to open augmented syscalls BPF skeleton");
} else {
/*
* Disable attaching the BPF programs except for sys_enter and
* sys_exit that tail call into this as necessary.
*/
struct bpf_program *prog;
bpf_object__for_each_program(prog, trace.skel->obj) {
if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
bpf_program__set_autoattach(prog, /*autoattach=*/false);
}
err = augmented_raw_syscalls_bpf__load(trace.skel);
if (err < 0) {
libbpf_strerror(err, bf, sizeof(bf));
pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
} else {
augmented_raw_syscalls_bpf__attach(trace.skel);
trace__add_syscall_newtp(&trace);
}
}
err = bpf__setup_bpf_output(trace.evlist);
if (err) {
libbpf_strerror(err, bf, sizeof(bf));
pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
goto out;
}
trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__"));
skip_augmentation:
#endif
err = -1;
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
}
if (trace.opts.mmap_pages == UINT_MAX)
mmap_pages_user_set = false;
if (trace.max_stack == UINT_MAX) {
trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
max_stack_user_set = false;
}
#ifdef HAVE_DWARF_UNWIND_SUPPORT
if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
}
#endif
if (callchain_param.enabled) {
if (!mmap_pages_user_set && geteuid() == 0)
trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
symbol_conf.use_callchain = true;
}
if (trace.evlist->core.nr_entries > 0) {
evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
if (evlist__set_syscall_tp_fields(trace.evlist)) {
perror("failed to set syscalls:* tracepoint fields");
goto out;
}
}
if (trace.sort_events) {
ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
ordered_events__set_copy_on_queue(&trace.oe.data, true);
}
/*
* If we are augmenting syscalls, then combine what we put in the
* __augmented_syscalls__ BPF map with what is in the
* syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
* combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
*
* We'll switch to look at two BPF maps, one for sys_enter and the
* other for sys_exit when we start augmenting the sys_exit paths with
* buffers that are being copied from kernel to userspace, think 'read'
* syscall.
*/
if (trace.syscalls.events.bpf_output) {
evlist__for_each_entry(trace.evlist, evsel) {
bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
if (raw_syscalls_sys_exit) {
trace.raw_augmented_syscalls = true;
goto init_augmented_syscall_tp;
}
if (trace.syscalls.events.bpf_output->priv == NULL &&
strstr(evsel__name(evsel), "syscalls:sys_enter")) {
struct evsel *augmented = trace.syscalls.events.bpf_output;
if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
evsel__init_augmented_syscall_tp_args(augmented))
goto out;
/*
* Augmented is __augmented_syscalls__ BPF_OUTPUT event
* Above we made sure we can get from the payload the tp fields
* that we get from syscalls:sys_enter tracefs format file.
*/
augmented->handler = trace__sys_enter;
/*
* Now we do the same for the *syscalls:sys_enter event so that
* if we handle it directly, i.e. if the BPF prog returns 0 so
* as not to filter it, then we'll handle it just like we would
* for the BPF_OUTPUT one:
*/
if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
evsel__init_augmented_syscall_tp_args(evsel))
goto out;
evsel->handler = trace__sys_enter;
}
if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
struct syscall_tp *sc;
init_augmented_syscall_tp:
if (evsel__init_augmented_syscall_tp(evsel, evsel))
goto out;
sc = __evsel__syscall_tp(evsel);
/*
* For now with BPF raw_augmented we hook into
* raw_syscalls:sys_enter and there we get all
* 6 syscall args plus the tracepoint common
* fields and the syscall_nr (another long).
* So we check if that is the case and if so
* don't look after the sc->args_size but
* always after the full raw_syscalls:sys_enter
* payload, which is fixed.
*
* We'll revisit this later to pass
* s->args_size to the BPF augmenter (now
* tools/perf/examples/bpf/augmented_raw_syscalls.c,
* so that it copies only what we need for each
* syscall, like what happens when we use
* syscalls:sys_enter_NAME, so that we reduce
* the kernel/userspace traffic to just what is
* needed for each syscall.
*/
if (trace.raw_augmented_syscalls)
trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
}
}
}
if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
return trace__record(&trace, argc-1, &argv[1]);
/* Using just --errno-summary will trigger --summary */
if (trace.errno_summary && !trace.summary && !trace.summary_only)
trace.summary_only = true;
/* summary_only implies summary option, but don't overwrite summary if set */
if (trace.summary_only)
trace.summary = trace.summary_only;
if (output_name != NULL) {
err = trace__open_output(&trace, output_name);
if (err < 0) {
perror("failed to create output file");
goto out;
}
}
err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
if (err)
goto out_close;
err = target__validate(&trace.opts.target);
if (err) {
target__strerror(&trace.opts.target, err, bf, sizeof(bf));
fprintf(trace.output, "%s", bf);
goto out_close;
}
err = target__parse_uid(&trace.opts.target);
if (err) {
target__strerror(&trace.opts.target, err, bf, sizeof(bf));
fprintf(trace.output, "%s", bf);
goto out_close;
}
if (!argc && target__none(&trace.opts.target))
trace.opts.target.system_wide = true;
if (input_name)
err = trace__replay(&trace);
else
err = trace__run(&trace, argc, argv);
out_close:
if (output_name != NULL)
fclose(trace.output);
out:
trace__exit(&trace);
#ifdef HAVE_BPF_SKEL
augmented_raw_syscalls_bpf__destroy(trace.skel);
#endif
return err;
}
| linux-master | tools/perf/builtin-trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-timechart.c - make an svg timechart of system activity
*
* (C) Copyright 2009 Intel Corporation
*
* Authors:
* Arjan van de Ven <[email protected]>
*/
#include <errno.h>
#include <inttypes.h>
#include "builtin.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/evlist.h" // for struct evsel_str_handler
#include "util/evsel.h"
#include <linux/kernel.h>
#include <linux/rbtree.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include "util/symbol.h"
#include "util/thread.h"
#include "util/callchain.h"
#include "util/header.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/event.h"
#include "util/session.h"
#include "util/svghelper.h"
#include "util/tool.h"
#include "util/data.h"
#include "util/debug.h"
#include "util/string2.h"
#include "util/tracepoint.h"
#include "util/util.h"
#include <linux/err.h>
#include <traceevent/event-parse.h>
#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
FILE *open_memstream(char **ptr, size_t *sizeloc);
#endif
#define SUPPORT_OLD_POWER_EVENTS 1
#define PWR_EVENT_EXIT -1
struct per_pid;
struct power_event;
struct wake_event;
struct timechart {
struct perf_tool tool;
struct per_pid *all_data;
struct power_event *power_events;
struct wake_event *wake_events;
int proc_num;
unsigned int numcpus;
u64 min_freq, /* Lowest CPU frequency seen */
max_freq, /* Highest CPU frequency seen */
turbo_frequency,
first_time, last_time;
bool power_only,
tasks_only,
with_backtrace,
topology;
bool force;
/* IO related settings */
bool io_only,
skip_eagain;
u64 io_events;
u64 min_time,
merge_dist;
};
struct per_pidcomm;
struct cpu_sample;
struct io_sample;
/*
* Datastructure layout:
* We keep an list of "pid"s, matching the kernels notion of a task struct.
* Each "pid" entry, has a list of "comm"s.
* this is because we want to track different programs different, while
* exec will reuse the original pid (by design).
* Each comm has a list of samples that will be used to draw
* final graph.
*/
struct per_pid {
struct per_pid *next;
int pid;
int ppid;
u64 start_time;
u64 end_time;
u64 total_time;
u64 total_bytes;
int display;
struct per_pidcomm *all;
struct per_pidcomm *current;
};
struct per_pidcomm {
struct per_pidcomm *next;
u64 start_time;
u64 end_time;
u64 total_time;
u64 max_bytes;
u64 total_bytes;
int Y;
int display;
long state;
u64 state_since;
char *comm;
struct cpu_sample *samples;
struct io_sample *io_samples;
};
struct sample_wrapper {
struct sample_wrapper *next;
u64 timestamp;
unsigned char data[];
};
#define TYPE_NONE 0
#define TYPE_RUNNING 1
#define TYPE_WAITING 2
#define TYPE_BLOCKED 3
struct cpu_sample {
struct cpu_sample *next;
u64 start_time;
u64 end_time;
int type;
int cpu;
const char *backtrace;
};
enum {
IOTYPE_READ,
IOTYPE_WRITE,
IOTYPE_SYNC,
IOTYPE_TX,
IOTYPE_RX,
IOTYPE_POLL,
};
struct io_sample {
struct io_sample *next;
u64 start_time;
u64 end_time;
u64 bytes;
int type;
int fd;
int err;
int merges;
};
#define CSTATE 1
#define PSTATE 2
struct power_event {
struct power_event *next;
int type;
int state;
u64 start_time;
u64 end_time;
int cpu;
};
struct wake_event {
struct wake_event *next;
int waker;
int wakee;
u64 time;
const char *backtrace;
};
struct process_filter {
char *name;
int pid;
struct process_filter *next;
};
static struct process_filter *process_filter;
static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
{
struct per_pid *cursor = tchart->all_data;
while (cursor) {
if (cursor->pid == pid)
return cursor;
cursor = cursor->next;
}
cursor = zalloc(sizeof(*cursor));
assert(cursor != NULL);
cursor->pid = pid;
cursor->next = tchart->all_data;
tchart->all_data = cursor;
return cursor;
}
static struct per_pidcomm *create_pidcomm(struct per_pid *p)
{
struct per_pidcomm *c;
c = zalloc(sizeof(*c));
if (!c)
return NULL;
p->current = c;
c->next = p->all;
p->all = c;
return c;
}
static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
{
struct per_pid *p;
struct per_pidcomm *c;
p = find_create_pid(tchart, pid);
c = p->all;
while (c) {
if (c->comm && strcmp(c->comm, comm) == 0) {
p->current = c;
return;
}
if (!c->comm) {
c->comm = strdup(comm);
p->current = c;
return;
}
c = c->next;
}
c = create_pidcomm(p);
assert(c != NULL);
c->comm = strdup(comm);
}
static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
{
struct per_pid *p, *pp;
p = find_create_pid(tchart, pid);
pp = find_create_pid(tchart, ppid);
p->ppid = ppid;
if (pp->current && pp->current->comm && !p->current)
pid_set_comm(tchart, pid, pp->current->comm);
p->start_time = timestamp;
if (p->current && !p->current->start_time) {
p->current->start_time = timestamp;
p->current->state_since = timestamp;
}
}
static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
{
struct per_pid *p;
p = find_create_pid(tchart, pid);
p->end_time = timestamp;
if (p->current)
p->current->end_time = timestamp;
}
static void pid_put_sample(struct timechart *tchart, int pid, int type,
unsigned int cpu, u64 start, u64 end,
const char *backtrace)
{
struct per_pid *p;
struct per_pidcomm *c;
struct cpu_sample *sample;
p = find_create_pid(tchart, pid);
c = p->current;
if (!c) {
c = create_pidcomm(p);
assert(c != NULL);
}
sample = zalloc(sizeof(*sample));
assert(sample != NULL);
sample->start_time = start;
sample->end_time = end;
sample->type = type;
sample->next = c->samples;
sample->cpu = cpu;
sample->backtrace = backtrace;
c->samples = sample;
if (sample->type == TYPE_RUNNING && end > start && start > 0) {
c->total_time += (end-start);
p->total_time += (end-start);
}
if (c->start_time == 0 || c->start_time > start)
c->start_time = start;
if (p->start_time == 0 || p->start_time > start)
p->start_time = start;
}
#define MAX_CPUS 4096
static u64 *cpus_cstate_start_times;
static int *cpus_cstate_state;
static u64 *cpus_pstate_start_times;
static u64 *cpus_pstate_state;
static int process_comm_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct timechart *tchart = container_of(tool, struct timechart, tool);
pid_set_comm(tchart, event->comm.tid, event->comm.comm);
return 0;
}
static int process_fork_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct timechart *tchart = container_of(tool, struct timechart, tool);
pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
static int process_exit_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct timechart *tchart = container_of(tool, struct timechart, tool);
pid_exit(tchart, event->fork.pid, event->fork.time);
return 0;
}
#ifdef SUPPORT_OLD_POWER_EVENTS
static int use_old_power_events;
#endif
static void c_state_start(int cpu, u64 timestamp, int state)
{
cpus_cstate_start_times[cpu] = timestamp;
cpus_cstate_state[cpu] = state;
}
static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
{
struct power_event *pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
pwr->state = cpus_cstate_state[cpu];
pwr->start_time = cpus_cstate_start_times[cpu];
pwr->end_time = timestamp;
pwr->cpu = cpu;
pwr->type = CSTATE;
pwr->next = tchart->power_events;
tchart->power_events = pwr;
}
static struct power_event *p_state_end(struct timechart *tchart, int cpu,
u64 timestamp)
{
struct power_event *pwr = zalloc(sizeof(*pwr));
if (!pwr)
return NULL;
pwr->state = cpus_pstate_state[cpu];
pwr->start_time = cpus_pstate_start_times[cpu];
pwr->end_time = timestamp;
pwr->cpu = cpu;
pwr->type = PSTATE;
pwr->next = tchart->power_events;
if (!pwr->start_time)
pwr->start_time = tchart->first_time;
tchart->power_events = pwr;
return pwr;
}
static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
{
struct power_event *pwr;
if (new_freq > 8000000) /* detect invalid data */
return;
pwr = p_state_end(tchart, cpu, timestamp);
if (!pwr)
return;
cpus_pstate_state[cpu] = new_freq;
cpus_pstate_start_times[cpu] = timestamp;
if ((u64)new_freq > tchart->max_freq)
tchart->max_freq = new_freq;
if (new_freq < tchart->min_freq || tchart->min_freq == 0)
tchart->min_freq = new_freq;
if (new_freq == tchart->max_freq - 1000)
tchart->turbo_frequency = tchart->max_freq;
}
static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
int waker, int wakee, u8 flags, const char *backtrace)
{
struct per_pid *p;
struct wake_event *we = zalloc(sizeof(*we));
if (!we)
return;
we->time = timestamp;
we->waker = waker;
we->backtrace = backtrace;
if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
we->waker = -1;
we->wakee = wakee;
we->next = tchart->wake_events;
tchart->wake_events = we;
p = find_create_pid(tchart, we->wakee);
if (p && p->current && p->current->state == TYPE_NONE) {
p->current->state_since = timestamp;
p->current->state = TYPE_WAITING;
}
if (p && p->current && p->current->state == TYPE_BLOCKED) {
pid_put_sample(tchart, p->pid, p->current->state, cpu,
p->current->state_since, timestamp, NULL);
p->current->state_since = timestamp;
p->current->state = TYPE_WAITING;
}
}
static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
int prev_pid, int next_pid, u64 prev_state,
const char *backtrace)
{
struct per_pid *p = NULL, *prev_p;
prev_p = find_create_pid(tchart, prev_pid);
p = find_create_pid(tchart, next_pid);
if (prev_p->current && prev_p->current->state != TYPE_NONE)
pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
prev_p->current->state_since, timestamp,
backtrace);
if (p && p->current) {
if (p->current->state != TYPE_NONE)
pid_put_sample(tchart, next_pid, p->current->state, cpu,
p->current->state_since, timestamp,
backtrace);
p->current->state_since = timestamp;
p->current->state = TYPE_RUNNING;
}
if (prev_p->current) {
prev_p->current->state = TYPE_NONE;
prev_p->current->state_since = timestamp;
if (prev_state & 2)
prev_p->current->state = TYPE_BLOCKED;
if (prev_state == 0)
prev_p->current->state = TYPE_WAITING;
}
}
static const char *cat_backtrace(union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct addr_location al;
unsigned int i;
char *p = NULL;
size_t p_len;
u8 cpumode = PERF_RECORD_MISC_USER;
struct ip_callchain *chain = sample->callchain;
FILE *f = open_memstream(&p, &p_len);
if (!f) {
perror("open_memstream error");
return NULL;
}
addr_location__init(&al);
if (!chain)
goto exit;
if (machine__resolve(machine, &al, sample) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
goto exit;
}
for (i = 0; i < chain->nr; i++) {
u64 ip;
struct addr_location tal;
if (callchain_param.order == ORDER_CALLEE)
ip = chain->ips[i];
else
ip = chain->ips[chain->nr - i - 1];
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
cpumode = PERF_RECORD_MISC_HYPERVISOR;
break;
case PERF_CONTEXT_KERNEL:
cpumode = PERF_RECORD_MISC_KERNEL;
break;
case PERF_CONTEXT_USER:
cpumode = PERF_RECORD_MISC_USER;
break;
default:
pr_debug("invalid callchain context: "
"%"PRId64"\n", (s64) ip);
/*
* It seems the callchain is corrupted.
* Discard all.
*/
zfree(&p);
goto exit;
}
continue;
}
addr_location__init(&tal);
tal.filtered = 0;
if (thread__find_symbol(al.thread, cpumode, ip, &tal))
fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
else
fprintf(f, "..... %016" PRIx64 "\n", ip);
addr_location__exit(&tal);
}
exit:
addr_location__exit(&al);
fclose(f);
return p;
}
typedef int (*tracepoint_handler)(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
const char *backtrace);
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct timechart *tchart = container_of(tool, struct timechart, tool);
if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
if (!tchart->first_time || tchart->first_time > sample->time)
tchart->first_time = sample->time;
if (tchart->last_time < sample->time)
tchart->last_time = sample->time;
}
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
return f(tchart, evsel, sample,
cat_backtrace(event, sample, machine));
}
return 0;
}
static int
process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u32 state = evsel__intval(evsel, sample, "state");
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
if (state == (u32)PWR_EVENT_EXIT)
c_state_end(tchart, cpu_id, sample->time);
else
c_state_start(cpu_id, sample->time, state);
return 0;
}
static int
process_sample_cpu_frequency(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u32 state = evsel__intval(evsel, sample, "state");
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
p_state_change(tchart, cpu_id, sample->time, state);
return 0;
}
static int
process_sample_sched_wakeup(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
const char *backtrace)
{
u8 flags = evsel__intval(evsel, sample, "common_flags");
int waker = evsel__intval(evsel, sample, "common_pid");
int wakee = evsel__intval(evsel, sample, "pid");
sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
return 0;
}
static int
process_sample_sched_switch(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
const char *backtrace)
{
int prev_pid = evsel__intval(evsel, sample, "prev_pid");
int next_pid = evsel__intval(evsel, sample, "next_pid");
u64 prev_state = evsel__intval(evsel, sample, "prev_state");
sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
prev_state, backtrace);
return 0;
}
#ifdef SUPPORT_OLD_POWER_EVENTS
static int
process_sample_power_start(struct timechart *tchart __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
u64 value = evsel__intval(evsel, sample, "value");
c_state_start(cpu_id, sample->time, value);
return 0;
}
static int
process_sample_power_end(struct timechart *tchart,
struct evsel *evsel __maybe_unused,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
c_state_end(tchart, sample->cpu, sample->time);
return 0;
}
static int
process_sample_power_frequency(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
u64 value = evsel__intval(evsel, sample, "value");
p_state_change(tchart, cpu_id, sample->time, value);
return 0;
}
#endif /* SUPPORT_OLD_POWER_EVENTS */
/*
* After the last sample we need to wrap up the current C/P state
* and close out each CPU for these.
*/
static void end_sample_processing(struct timechart *tchart)
{
u64 cpu;
struct power_event *pwr;
for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
/* C state */
#if 0
pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
pwr->state = cpus_cstate_state[cpu];
pwr->start_time = cpus_cstate_start_times[cpu];
pwr->end_time = tchart->last_time;
pwr->cpu = cpu;
pwr->type = CSTATE;
pwr->next = tchart->power_events;
tchart->power_events = pwr;
#endif
/* P state */
pwr = p_state_end(tchart, cpu, tchart->last_time);
if (!pwr)
return;
if (!pwr->state)
pwr->state = tchart->min_freq;
}
}
static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
u64 start, int fd)
{
struct per_pid *p = find_create_pid(tchart, pid);
struct per_pidcomm *c = p->current;
struct io_sample *sample;
struct io_sample *prev;
if (!c) {
c = create_pidcomm(p);
if (!c)
return -ENOMEM;
}
prev = c->io_samples;
if (prev && prev->start_time && !prev->end_time) {
pr_warning("Skip invalid start event: "
"previous event already started!\n");
/* remove previous event that has been started,
* we are not sure we will ever get an end for it */
c->io_samples = prev->next;
free(prev);
return 0;
}
sample = zalloc(sizeof(*sample));
if (!sample)
return -ENOMEM;
sample->start_time = start;
sample->type = type;
sample->fd = fd;
sample->next = c->io_samples;
c->io_samples = sample;
if (c->start_time == 0 || c->start_time > start)
c->start_time = start;
return 0;
}
static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
u64 end, long ret)
{
struct per_pid *p = find_create_pid(tchart, pid);
struct per_pidcomm *c = p->current;
struct io_sample *sample, *prev;
if (!c) {
pr_warning("Invalid pidcomm!\n");
return -1;
}
sample = c->io_samples;
if (!sample) /* skip partially captured events */
return 0;
if (sample->end_time) {
pr_warning("Skip invalid end event: "
"previous event already ended!\n");
return 0;
}
if (sample->type != type) {
pr_warning("Skip invalid end event: invalid event type!\n");
return 0;
}
sample->end_time = end;
prev = sample->next;
/* we want to be able to see small and fast transfers, so make them
* at least min_time long, but don't overlap them */
if (sample->end_time - sample->start_time < tchart->min_time)
sample->end_time = sample->start_time + tchart->min_time;
if (prev && sample->start_time < prev->end_time) {
if (prev->err) /* try to make errors more visible */
sample->start_time = prev->end_time;
else
prev->end_time = sample->start_time;
}
if (ret < 0) {
sample->err = ret;
} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
type == IOTYPE_TX || type == IOTYPE_RX) {
if ((u64)ret > c->max_bytes)
c->max_bytes = ret;
c->total_bytes += ret;
p->total_bytes += ret;
sample->bytes = ret;
}
/* merge two requests to make svg smaller and render-friendly */
if (prev &&
prev->type == sample->type &&
prev->err == sample->err &&
prev->fd == sample->fd &&
prev->end_time + tchart->merge_dist >= sample->start_time) {
sample->bytes += prev->bytes;
sample->merges += prev->merges + 1;
sample->start_time = prev->start_time;
sample->next = prev->next;
free(prev);
if (!sample->err && sample->bytes > c->max_bytes)
c->max_bytes = sample->bytes;
}
tchart->io_events++;
return 0;
}
static int
process_enter_read(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
sample->time, fd);
}
static int
process_exit_read(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
sample->time, ret);
}
static int
process_enter_write(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
sample->time, fd);
}
static int
process_exit_write(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
sample->time, ret);
}
static int
process_enter_sync(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
sample->time, fd);
}
static int
process_exit_sync(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
sample->time, ret);
}
static int
process_enter_tx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
sample->time, fd);
}
static int
process_exit_tx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
sample->time, ret);
}
static int
process_enter_rx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
sample->time, fd);
}
static int
process_exit_rx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
sample->time, ret);
}
static int
process_enter_poll(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
sample->time, fd);
}
static int
process_exit_poll(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
sample->time, ret);
}
/*
* Sort the pid datastructure
*/
static void sort_pids(struct timechart *tchart)
{
struct per_pid *new_list, *p, *cursor, *prev;
/* sort by ppid first, then by pid, lowest to highest */
new_list = NULL;
while (tchart->all_data) {
p = tchart->all_data;
tchart->all_data = p->next;
p->next = NULL;
if (new_list == NULL) {
new_list = p;
p->next = NULL;
continue;
}
prev = NULL;
cursor = new_list;
while (cursor) {
if (cursor->ppid > p->ppid ||
(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
/* must insert before */
if (prev) {
p->next = prev->next;
prev->next = p;
cursor = NULL;
continue;
} else {
p->next = new_list;
new_list = p;
cursor = NULL;
continue;
}
}
prev = cursor;
cursor = cursor->next;
if (!cursor)
prev->next = p;
}
}
tchart->all_data = new_list;
}
static void draw_c_p_states(struct timechart *tchart)
{
struct power_event *pwr;
pwr = tchart->power_events;
/*
* two pass drawing so that the P state bars are on top of the C state blocks
*/
while (pwr) {
if (pwr->type == CSTATE)
svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
pwr = pwr->next;
}
pwr = tchart->power_events;
while (pwr) {
if (pwr->type == PSTATE) {
if (!pwr->state)
pwr->state = tchart->min_freq;
svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
}
pwr = pwr->next;
}
}
static void draw_wakeups(struct timechart *tchart)
{
struct wake_event *we;
struct per_pid *p;
struct per_pidcomm *c;
we = tchart->wake_events;
while (we) {
int from = 0, to = 0;
char *task_from = NULL, *task_to = NULL;
/* locate the column of the waker and wakee */
p = tchart->all_data;
while (p) {
if (p->pid == we->waker || p->pid == we->wakee) {
c = p->all;
while (c) {
if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
if (p->pid == we->waker && !from) {
from = c->Y;
task_from = strdup(c->comm);
}
if (p->pid == we->wakee && !to) {
to = c->Y;
task_to = strdup(c->comm);
}
}
c = c->next;
}
c = p->all;
while (c) {
if (p->pid == we->waker && !from) {
from = c->Y;
task_from = strdup(c->comm);
}
if (p->pid == we->wakee && !to) {
to = c->Y;
task_to = strdup(c->comm);
}
c = c->next;
}
}
p = p->next;
}
if (!task_from) {
task_from = malloc(40);
sprintf(task_from, "[%i]", we->waker);
}
if (!task_to) {
task_to = malloc(40);
sprintf(task_to, "[%i]", we->wakee);
}
if (we->waker == -1)
svg_interrupt(we->time, to, we->backtrace);
else if (from && to && abs(from - to) == 1)
svg_wakeline(we->time, from, to, we->backtrace);
else
svg_partial_wakeline(we->time, from, task_from, to,
task_to, we->backtrace);
we = we->next;
free(task_from);
free(task_to);
}
}
static void draw_cpu_usage(struct timechart *tchart)
{
struct per_pid *p;
struct per_pidcomm *c;
struct cpu_sample *sample;
p = tchart->all_data;
while (p) {
c = p->all;
while (c) {
sample = c->samples;
while (sample) {
if (sample->type == TYPE_RUNNING) {
svg_process(sample->cpu,
sample->start_time,
sample->end_time,
p->pid,
c->comm,
sample->backtrace);
}
sample = sample->next;
}
c = c->next;
}
p = p->next;
}
}
static void draw_io_bars(struct timechart *tchart)
{
const char *suf;
double bytes;
char comm[256];
struct per_pid *p;
struct per_pidcomm *c;
struct io_sample *sample;
int Y = 1;
p = tchart->all_data;
while (p) {
c = p->all;
while (c) {
if (!c->display) {
c->Y = 0;
c = c->next;
continue;
}
svg_box(Y, c->start_time, c->end_time, "process3");
sample = c->io_samples;
for (sample = c->io_samples; sample; sample = sample->next) {
double h = (double)sample->bytes / c->max_bytes;
if (tchart->skip_eagain &&
sample->err == -EAGAIN)
continue;
if (sample->err)
h = 1;
if (sample->type == IOTYPE_SYNC)
svg_fbox(Y,
sample->start_time,
sample->end_time,
1,
sample->err ? "error" : "sync",
sample->fd,
sample->err,
sample->merges);
else if (sample->type == IOTYPE_POLL)
svg_fbox(Y,
sample->start_time,
sample->end_time,
1,
sample->err ? "error" : "poll",
sample->fd,
sample->err,
sample->merges);
else if (sample->type == IOTYPE_READ)
svg_ubox(Y,
sample->start_time,
sample->end_time,
h,
sample->err ? "error" : "disk",
sample->fd,
sample->err,
sample->merges);
else if (sample->type == IOTYPE_WRITE)
svg_lbox(Y,
sample->start_time,
sample->end_time,
h,
sample->err ? "error" : "disk",
sample->fd,
sample->err,
sample->merges);
else if (sample->type == IOTYPE_RX)
svg_ubox(Y,
sample->start_time,
sample->end_time,
h,
sample->err ? "error" : "net",
sample->fd,
sample->err,
sample->merges);
else if (sample->type == IOTYPE_TX)
svg_lbox(Y,
sample->start_time,
sample->end_time,
h,
sample->err ? "error" : "net",
sample->fd,
sample->err,
sample->merges);
}
suf = "";
bytes = c->total_bytes;
if (bytes > 1024) {
bytes = bytes / 1024;
suf = "K";
}
if (bytes > 1024) {
bytes = bytes / 1024;
suf = "M";
}
if (bytes > 1024) {
bytes = bytes / 1024;
suf = "G";
}
sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
svg_text(Y, c->start_time, comm);
c->Y = Y;
Y++;
c = c->next;
}
p = p->next;
}
}
static void draw_process_bars(struct timechart *tchart)
{
struct per_pid *p;
struct per_pidcomm *c;
struct cpu_sample *sample;
int Y = 0;
Y = 2 * tchart->numcpus + 2;
p = tchart->all_data;
while (p) {
c = p->all;
while (c) {
if (!c->display) {
c->Y = 0;
c = c->next;
continue;
}
svg_box(Y, c->start_time, c->end_time, "process");
sample = c->samples;
while (sample) {
if (sample->type == TYPE_RUNNING)
svg_running(Y, sample->cpu,
sample->start_time,
sample->end_time,
sample->backtrace);
if (sample->type == TYPE_BLOCKED)
svg_blocked(Y, sample->cpu,
sample->start_time,
sample->end_time,
sample->backtrace);
if (sample->type == TYPE_WAITING)
svg_waiting(Y, sample->cpu,
sample->start_time,
sample->end_time,
sample->backtrace);
sample = sample->next;
}
if (c->comm) {
char comm[256];
if (c->total_time > 5000000000) /* 5 seconds */
sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
else
sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
svg_text(Y, c->start_time, comm);
}
c->Y = Y;
Y++;
c = c->next;
}
p = p->next;
}
}
static void add_process_filter(const char *string)
{
int pid = strtoull(string, NULL, 10);
struct process_filter *filt = malloc(sizeof(*filt));
if (!filt)
return;
filt->name = strdup(string);
filt->pid = pid;
filt->next = process_filter;
process_filter = filt;
}
static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
{
struct process_filter *filt;
if (!process_filter)
return 1;
filt = process_filter;
while (filt) {
if (filt->pid && p->pid == filt->pid)
return 1;
if (strcmp(filt->name, c->comm) == 0)
return 1;
filt = filt->next;
}
return 0;
}
static int determine_display_tasks_filtered(struct timechart *tchart)
{
struct per_pid *p;
struct per_pidcomm *c;
int count = 0;
p = tchart->all_data;
while (p) {
p->display = 0;
if (p->start_time == 1)
p->start_time = tchart->first_time;
/* no exit marker, task kept running to the end */
if (p->end_time == 0)
p->end_time = tchart->last_time;
c = p->all;
while (c) {
c->display = 0;
if (c->start_time == 1)
c->start_time = tchart->first_time;
if (passes_filter(p, c)) {
c->display = 1;
p->display = 1;
count++;
}
if (c->end_time == 0)
c->end_time = tchart->last_time;
c = c->next;
}
p = p->next;
}
return count;
}
static int determine_display_tasks(struct timechart *tchart, u64 threshold)
{
struct per_pid *p;
struct per_pidcomm *c;
int count = 0;
p = tchart->all_data;
while (p) {
p->display = 0;
if (p->start_time == 1)
p->start_time = tchart->first_time;
/* no exit marker, task kept running to the end */
if (p->end_time == 0)
p->end_time = tchart->last_time;
if (p->total_time >= threshold)
p->display = 1;
c = p->all;
while (c) {
c->display = 0;
if (c->start_time == 1)
c->start_time = tchart->first_time;
if (c->total_time >= threshold) {
c->display = 1;
count++;
}
if (c->end_time == 0)
c->end_time = tchart->last_time;
c = c->next;
}
p = p->next;
}
return count;
}
static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
{
struct per_pid *p;
struct per_pidcomm *c;
int count = 0;
p = timechart->all_data;
while (p) {
/* no exit marker, task kept running to the end */
if (p->end_time == 0)
p->end_time = timechart->last_time;
c = p->all;
while (c) {
c->display = 0;
if (c->total_bytes >= threshold) {
c->display = 1;
count++;
}
if (c->end_time == 0)
c->end_time = timechart->last_time;
c = c->next;
}
p = p->next;
}
return count;
}
#define BYTES_THRESH (1 * 1024 * 1024)
#define TIME_THRESH 10000000
static void write_svg_file(struct timechart *tchart, const char *filename)
{
u64 i;
int count;
int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
if (tchart->power_only)
tchart->proc_num = 0;
/* We'd like to show at least proc_num tasks;
* be less picky if we have fewer */
do {
if (process_filter)
count = determine_display_tasks_filtered(tchart);
else if (tchart->io_events)
count = determine_display_io_tasks(tchart, thresh);
else
count = determine_display_tasks(tchart, thresh);
thresh /= 10;
} while (!process_filter && thresh && count < tchart->proc_num);
if (!tchart->proc_num)
count = 0;
if (tchart->io_events) {
open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
svg_time_grid(0.5);
svg_io_legenda();
draw_io_bars(tchart);
} else {
open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
svg_time_grid(0);
svg_legenda();
for (i = 0; i < tchart->numcpus; i++)
svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
draw_cpu_usage(tchart);
if (tchart->proc_num)
draw_process_bars(tchart);
if (!tchart->tasks_only)
draw_c_p_states(tchart);
if (tchart->proc_num)
draw_wakeups(tchart);
}
svg_close();
}
static int process_header(struct perf_file_section *section __maybe_unused,
struct perf_header *ph,
int feat,
int fd __maybe_unused,
void *data)
{
struct timechart *tchart = data;
switch (feat) {
case HEADER_NRCPUS:
tchart->numcpus = ph->env.nr_cpus_avail;
break;
case HEADER_CPU_TOPOLOGY:
if (!tchart->topology)
break;
if (svg_build_topology_map(&ph->env))
fprintf(stderr, "problem building topology\n");
break;
default:
break;
}
return 0;
}
static int __cmd_timechart(struct timechart *tchart, const char *output_name)
{
const struct evsel_str_handler power_tracepoints[] = {
{ "power:cpu_idle", process_sample_cpu_idle },
{ "power:cpu_frequency", process_sample_cpu_frequency },
{ "sched:sched_wakeup", process_sample_sched_wakeup },
{ "sched:sched_switch", process_sample_sched_switch },
#ifdef SUPPORT_OLD_POWER_EVENTS
{ "power:power_start", process_sample_power_start },
{ "power:power_end", process_sample_power_end },
{ "power:power_frequency", process_sample_power_frequency },
#endif
{ "syscalls:sys_enter_read", process_enter_read },
{ "syscalls:sys_enter_pread64", process_enter_read },
{ "syscalls:sys_enter_readv", process_enter_read },
{ "syscalls:sys_enter_preadv", process_enter_read },
{ "syscalls:sys_enter_write", process_enter_write },
{ "syscalls:sys_enter_pwrite64", process_enter_write },
{ "syscalls:sys_enter_writev", process_enter_write },
{ "syscalls:sys_enter_pwritev", process_enter_write },
{ "syscalls:sys_enter_sync", process_enter_sync },
{ "syscalls:sys_enter_sync_file_range", process_enter_sync },
{ "syscalls:sys_enter_fsync", process_enter_sync },
{ "syscalls:sys_enter_msync", process_enter_sync },
{ "syscalls:sys_enter_recvfrom", process_enter_rx },
{ "syscalls:sys_enter_recvmmsg", process_enter_rx },
{ "syscalls:sys_enter_recvmsg", process_enter_rx },
{ "syscalls:sys_enter_sendto", process_enter_tx },
{ "syscalls:sys_enter_sendmsg", process_enter_tx },
{ "syscalls:sys_enter_sendmmsg", process_enter_tx },
{ "syscalls:sys_enter_epoll_pwait", process_enter_poll },
{ "syscalls:sys_enter_epoll_wait", process_enter_poll },
{ "syscalls:sys_enter_poll", process_enter_poll },
{ "syscalls:sys_enter_ppoll", process_enter_poll },
{ "syscalls:sys_enter_pselect6", process_enter_poll },
{ "syscalls:sys_enter_select", process_enter_poll },
{ "syscalls:sys_exit_read", process_exit_read },
{ "syscalls:sys_exit_pread64", process_exit_read },
{ "syscalls:sys_exit_readv", process_exit_read },
{ "syscalls:sys_exit_preadv", process_exit_read },
{ "syscalls:sys_exit_write", process_exit_write },
{ "syscalls:sys_exit_pwrite64", process_exit_write },
{ "syscalls:sys_exit_writev", process_exit_write },
{ "syscalls:sys_exit_pwritev", process_exit_write },
{ "syscalls:sys_exit_sync", process_exit_sync },
{ "syscalls:sys_exit_sync_file_range", process_exit_sync },
{ "syscalls:sys_exit_fsync", process_exit_sync },
{ "syscalls:sys_exit_msync", process_exit_sync },
{ "syscalls:sys_exit_recvfrom", process_exit_rx },
{ "syscalls:sys_exit_recvmmsg", process_exit_rx },
{ "syscalls:sys_exit_recvmsg", process_exit_rx },
{ "syscalls:sys_exit_sendto", process_exit_tx },
{ "syscalls:sys_exit_sendmsg", process_exit_tx },
{ "syscalls:sys_exit_sendmmsg", process_exit_tx },
{ "syscalls:sys_exit_epoll_pwait", process_exit_poll },
{ "syscalls:sys_exit_epoll_wait", process_exit_poll },
{ "syscalls:sys_exit_poll", process_exit_poll },
{ "syscalls:sys_exit_ppoll", process_exit_poll },
{ "syscalls:sys_exit_pselect6", process_exit_poll },
{ "syscalls:sys_exit_select", process_exit_poll },
};
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = tchart->force,
};
struct perf_session *session = perf_session__new(&data, &tchart->tool);
int ret = -EINVAL;
if (IS_ERR(session))
return PTR_ERR(session);
symbol__init(&session->header.env);
(void)perf_header__process_sections(&session->header,
perf_data__fd(session->data),
tchart,
process_header);
if (!perf_session__has_traces(session, "timechart record"))
goto out_delete;
if (perf_session__set_tracepoints_handlers(session,
power_tracepoints)) {
pr_err("Initializing session tracepoint handlers failed\n");
goto out_delete;
}
ret = perf_session__process_events(session);
if (ret)
goto out_delete;
end_sample_processing(tchart);
sort_pids(tchart);
write_svg_file(tchart, output_name);
pr_info("Written %2.1f seconds of trace to %s.\n",
(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
out_delete:
perf_session__delete(session);
return ret;
}
static int timechart__io_record(int argc, const char **argv)
{
unsigned int rec_argc, i;
const char **rec_argv;
const char **p;
char *filter = NULL;
const char * const common_args[] = {
"record", "-a", "-R", "-c", "1",
};
unsigned int common_args_nr = ARRAY_SIZE(common_args);
const char * const disk_events[] = {
"syscalls:sys_enter_read",
"syscalls:sys_enter_pread64",
"syscalls:sys_enter_readv",
"syscalls:sys_enter_preadv",
"syscalls:sys_enter_write",
"syscalls:sys_enter_pwrite64",
"syscalls:sys_enter_writev",
"syscalls:sys_enter_pwritev",
"syscalls:sys_enter_sync",
"syscalls:sys_enter_sync_file_range",
"syscalls:sys_enter_fsync",
"syscalls:sys_enter_msync",
"syscalls:sys_exit_read",
"syscalls:sys_exit_pread64",
"syscalls:sys_exit_readv",
"syscalls:sys_exit_preadv",
"syscalls:sys_exit_write",
"syscalls:sys_exit_pwrite64",
"syscalls:sys_exit_writev",
"syscalls:sys_exit_pwritev",
"syscalls:sys_exit_sync",
"syscalls:sys_exit_sync_file_range",
"syscalls:sys_exit_fsync",
"syscalls:sys_exit_msync",
};
unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
const char * const net_events[] = {
"syscalls:sys_enter_recvfrom",
"syscalls:sys_enter_recvmmsg",
"syscalls:sys_enter_recvmsg",
"syscalls:sys_enter_sendto",
"syscalls:sys_enter_sendmsg",
"syscalls:sys_enter_sendmmsg",
"syscalls:sys_exit_recvfrom",
"syscalls:sys_exit_recvmmsg",
"syscalls:sys_exit_recvmsg",
"syscalls:sys_exit_sendto",
"syscalls:sys_exit_sendmsg",
"syscalls:sys_exit_sendmmsg",
};
unsigned int net_events_nr = ARRAY_SIZE(net_events);
const char * const poll_events[] = {
"syscalls:sys_enter_epoll_pwait",
"syscalls:sys_enter_epoll_wait",
"syscalls:sys_enter_poll",
"syscalls:sys_enter_ppoll",
"syscalls:sys_enter_pselect6",
"syscalls:sys_enter_select",
"syscalls:sys_exit_epoll_pwait",
"syscalls:sys_exit_epoll_wait",
"syscalls:sys_exit_poll",
"syscalls:sys_exit_ppoll",
"syscalls:sys_exit_pselect6",
"syscalls:sys_exit_select",
};
unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
rec_argc = common_args_nr +
disk_events_nr * 4 +
net_events_nr * 4 +
poll_events_nr * 4 +
argc;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
free(rec_argv);
return -ENOMEM;
}
p = rec_argv;
for (i = 0; i < common_args_nr; i++)
*p++ = strdup(common_args[i]);
for (i = 0; i < disk_events_nr; i++) {
if (!is_valid_tracepoint(disk_events[i])) {
rec_argc -= 4;
continue;
}
*p++ = "-e";
*p++ = strdup(disk_events[i]);
*p++ = "--filter";
*p++ = filter;
}
for (i = 0; i < net_events_nr; i++) {
if (!is_valid_tracepoint(net_events[i])) {
rec_argc -= 4;
continue;
}
*p++ = "-e";
*p++ = strdup(net_events[i]);
*p++ = "--filter";
*p++ = filter;
}
for (i = 0; i < poll_events_nr; i++) {
if (!is_valid_tracepoint(poll_events[i])) {
rec_argc -= 4;
continue;
}
*p++ = "-e";
*p++ = strdup(poll_events[i]);
*p++ = "--filter";
*p++ = filter;
}
for (i = 0; i < (unsigned int)argc; i++)
*p++ = argv[i];
return cmd_record(rec_argc, rec_argv);
}
static int timechart__record(struct timechart *tchart, int argc, const char **argv)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
const char **p;
unsigned int record_elems;
const char * const common_args[] = {
"record", "-a", "-R", "-c", "1",
};
unsigned int common_args_nr = ARRAY_SIZE(common_args);
const char * const backtrace_args[] = {
"-g",
};
unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
const char * const power_args[] = {
"-e", "power:cpu_frequency",
"-e", "power:cpu_idle",
};
unsigned int power_args_nr = ARRAY_SIZE(power_args);
const char * const old_power_args[] = {
#ifdef SUPPORT_OLD_POWER_EVENTS
"-e", "power:power_start",
"-e", "power:power_end",
"-e", "power:power_frequency",
#endif
};
unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
const char * const tasks_args[] = {
"-e", "sched:sched_wakeup",
"-e", "sched:sched_switch",
};
unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
#ifdef SUPPORT_OLD_POWER_EVENTS
if (!is_valid_tracepoint("power:cpu_idle") &&
is_valid_tracepoint("power:power_start")) {
use_old_power_events = 1;
power_args_nr = 0;
} else {
old_power_args_nr = 0;
}
#endif
if (tchart->power_only)
tasks_args_nr = 0;
if (tchart->tasks_only) {
power_args_nr = 0;
old_power_args_nr = 0;
}
if (!tchart->with_backtrace)
backtrace_args_no = 0;
record_elems = common_args_nr + tasks_args_nr +
power_args_nr + old_power_args_nr + backtrace_args_no;
rec_argc = record_elems + argc;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
p = rec_argv;
for (i = 0; i < common_args_nr; i++)
*p++ = strdup(common_args[i]);
for (i = 0; i < backtrace_args_no; i++)
*p++ = strdup(backtrace_args[i]);
for (i = 0; i < tasks_args_nr; i++)
*p++ = strdup(tasks_args[i]);
for (i = 0; i < power_args_nr; i++)
*p++ = strdup(power_args[i]);
for (i = 0; i < old_power_args_nr; i++)
*p++ = strdup(old_power_args[i]);
for (j = 0; j < (unsigned int)argc; j++)
*p++ = argv[j];
return cmd_record(rec_argc, rec_argv);
}
static int
parse_process(const struct option *opt __maybe_unused, const char *arg,
int __maybe_unused unset)
{
if (arg)
add_process_filter(arg);
return 0;
}
static int
parse_highlight(const struct option *opt __maybe_unused, const char *arg,
int __maybe_unused unset)
{
unsigned long duration = strtoul(arg, NULL, 0);
if (svg_highlight || svg_highlight_name)
return -1;
if (duration)
svg_highlight = duration;
else
svg_highlight_name = strdup(arg);
return 0;
}
static int
parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
{
char unit = 'n';
u64 *value = opt->value;
if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
switch (unit) {
case 'm':
*value *= NSEC_PER_MSEC;
break;
case 'u':
*value *= NSEC_PER_USEC;
break;
case 'n':
break;
default:
return -1;
}
}
return 0;
}
int cmd_timechart(int argc, const char **argv)
{
struct timechart tchart = {
.tool = {
.comm = process_comm_event,
.fork = process_fork_event,
.exit = process_exit_event,
.sample = process_sample_event,
.ordered_events = true,
},
.proc_num = 15,
.min_time = NSEC_PER_MSEC,
.merge_dist = 1000,
};
const char *output_name = "output.svg";
const struct option timechart_common_options[] = {
OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
OPT_END()
};
const struct option timechart_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_INTEGER('w', "width", &svg_page_width, "page width"),
OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
"highlight tasks. Pass duration in ns or process name.",
parse_highlight),
OPT_CALLBACK('p', "process", NULL, "process",
"process selector. Pass a pid or process name.",
parse_process),
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
OPT_INTEGER('n', "proc-num", &tchart.proc_num,
"min. number of tasks to print"),
OPT_BOOLEAN('t', "topology", &tchart.topology,
"sort CPUs according to topology"),
OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
"skip EAGAIN errors"),
OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
"all IO faster than min-time will visually appear longer",
parse_time),
OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
"merge events that are merge-dist us apart",
parse_time),
OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
OPT_PARENT(timechart_common_options),
};
const char * const timechart_subcommands[] = { "record", NULL };
const char *timechart_usage[] = {
"perf timechart [<options>] {record}",
NULL
};
const struct option timechart_record_options[] = {
OPT_BOOLEAN('I', "io-only", &tchart.io_only,
"record only IO data"),
OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
OPT_PARENT(timechart_common_options),
};
const char * const timechart_record_usage[] = {
"perf timechart record [<options>]",
NULL
};
int ret;
cpus_cstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_cstate_start_times));
if (!cpus_cstate_start_times)
return -ENOMEM;
cpus_cstate_state = calloc(MAX_CPUS, sizeof(*cpus_cstate_state));
if (!cpus_cstate_state) {
ret = -ENOMEM;
goto out;
}
cpus_pstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_pstate_start_times));
if (!cpus_pstate_start_times) {
ret = -ENOMEM;
goto out;
}
cpus_pstate_state = calloc(MAX_CPUS, sizeof(*cpus_pstate_state));
if (!cpus_pstate_state) {
ret = -ENOMEM;
goto out;
}
argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (tchart.power_only && tchart.tasks_only) {
pr_err("-P and -T options cannot be used at the same time.\n");
ret = -1;
goto out;
}
if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
argc = parse_options(argc, argv, timechart_record_options,
timechart_record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (tchart.power_only && tchart.tasks_only) {
pr_err("-P and -T options cannot be used at the same time.\n");
ret = -1;
goto out;
}
if (tchart.io_only)
ret = timechart__io_record(argc, argv);
else
ret = timechart__record(&tchart, argc, argv);
goto out;
} else if (argc)
usage_with_options(timechart_usage, timechart_options);
setup_pager();
ret = __cmd_timechart(&tchart, output_name);
out:
zfree(&cpus_cstate_start_times);
zfree(&cpus_cstate_state);
zfree(&cpus_pstate_start_times);
zfree(&cpus_pstate_state);
return ret;
}
| linux-master | tools/perf/builtin-timechart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-diff.c
*
* Builtin diff command: Analyze two perf.data input files, look up and read
* DSOs and symbol information, sort them and produce a diff.
*/
#include "builtin.h"
#include "util/debug.h"
#include "util/event.h"
#include "util/hist.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/sort.h"
#include "util/srcline.h"
#include "util/symbol.h"
#include "util/data.h"
#include "util/config.h"
#include "util/time-utils.h"
#include "util/annotate.h"
#include "util/map.h"
#include "util/spark.h"
#include "util/block-info.h"
#include "util/stream.h"
#include "util/util.h"
#include <linux/err.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <math.h>
struct perf_diff {
struct perf_tool tool;
const char *time_str;
struct perf_time_interval *ptime_range;
int range_size;
int range_num;
bool has_br_stack;
bool stream;
};
/* Diff command specific HPP columns. */
enum {
PERF_HPP_DIFF__BASELINE,
PERF_HPP_DIFF__PERIOD,
PERF_HPP_DIFF__PERIOD_BASELINE,
PERF_HPP_DIFF__DELTA,
PERF_HPP_DIFF__RATIO,
PERF_HPP_DIFF__WEIGHTED_DIFF,
PERF_HPP_DIFF__FORMULA,
PERF_HPP_DIFF__DELTA_ABS,
PERF_HPP_DIFF__CYCLES,
PERF_HPP_DIFF__CYCLES_HIST,
PERF_HPP_DIFF__MAX_INDEX
};
struct diff_hpp_fmt {
struct perf_hpp_fmt fmt;
int idx;
char *header;
int header_width;
};
struct data__file {
struct perf_session *session;
struct perf_data data;
int idx;
struct hists *hists;
struct evlist_streams *evlist_streams;
struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX];
};
static struct data__file *data__files;
static int data__files_cnt;
#define data__for_each_file_start(i, d, s) \
for (i = s, d = &data__files[s]; \
i < data__files_cnt; \
i++, d = &data__files[i])
#define data__for_each_file(i, d) data__for_each_file_start(i, d, 0)
#define data__for_each_file_new(i, d) data__for_each_file_start(i, d, 1)
static bool force;
static bool show_period;
static bool show_formula;
static bool show_baseline_only;
static bool cycles_hist;
static unsigned int sort_compute = 1;
static s64 compute_wdiff_w1;
static s64 compute_wdiff_w2;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
COMPUTE_WEIGHTED_DIFF,
COMPUTE_DELTA_ABS,
COMPUTE_CYCLES,
COMPUTE_MAX,
COMPUTE_STREAM, /* After COMPUTE_MAX to avoid use current compute arrays */
};
const char *compute_names[COMPUTE_MAX] = {
[COMPUTE_DELTA] = "delta",
[COMPUTE_DELTA_ABS] = "delta-abs",
[COMPUTE_RATIO] = "ratio",
[COMPUTE_WEIGHTED_DIFF] = "wdiff",
[COMPUTE_CYCLES] = "cycles",
};
static int compute = COMPUTE_DELTA_ABS;
static int compute_2_hpp[COMPUTE_MAX] = {
[COMPUTE_DELTA] = PERF_HPP_DIFF__DELTA,
[COMPUTE_DELTA_ABS] = PERF_HPP_DIFF__DELTA_ABS,
[COMPUTE_RATIO] = PERF_HPP_DIFF__RATIO,
[COMPUTE_WEIGHTED_DIFF] = PERF_HPP_DIFF__WEIGHTED_DIFF,
[COMPUTE_CYCLES] = PERF_HPP_DIFF__CYCLES,
};
#define MAX_COL_WIDTH 70
static struct header_column {
const char *name;
int width;
} columns[PERF_HPP_DIFF__MAX_INDEX] = {
[PERF_HPP_DIFF__BASELINE] = {
.name = "Baseline",
},
[PERF_HPP_DIFF__PERIOD] = {
.name = "Period",
.width = 14,
},
[PERF_HPP_DIFF__PERIOD_BASELINE] = {
.name = "Base period",
.width = 14,
},
[PERF_HPP_DIFF__DELTA] = {
.name = "Delta",
.width = 7,
},
[PERF_HPP_DIFF__DELTA_ABS] = {
.name = "Delta Abs",
.width = 7,
},
[PERF_HPP_DIFF__RATIO] = {
.name = "Ratio",
.width = 14,
},
[PERF_HPP_DIFF__WEIGHTED_DIFF] = {
.name = "Weighted diff",
.width = 14,
},
[PERF_HPP_DIFF__FORMULA] = {
.name = "Formula",
.width = MAX_COL_WIDTH,
},
[PERF_HPP_DIFF__CYCLES] = {
.name = "[Program Block Range] Cycles Diff",
.width = 70,
},
[PERF_HPP_DIFF__CYCLES_HIST] = {
.name = "stddev/Hist",
.width = NUM_SPARKS + 9,
}
};
static int setup_compute_opt_wdiff(char *opt)
{
char *w1_str = opt;
char *w2_str;
int ret = -EINVAL;
if (!opt)
goto out;
w2_str = strchr(opt, ',');
if (!w2_str)
goto out;
*w2_str++ = 0x0;
if (!*w2_str)
goto out;
compute_wdiff_w1 = strtol(w1_str, NULL, 10);
compute_wdiff_w2 = strtol(w2_str, NULL, 10);
if (!compute_wdiff_w1 || !compute_wdiff_w2)
goto out;
pr_debug("compute wdiff w1(%" PRId64 ") w2(%" PRId64 ")\n",
compute_wdiff_w1, compute_wdiff_w2);
ret = 0;
out:
if (ret)
pr_err("Failed: wrong weight data, use 'wdiff:w1,w2'\n");
return ret;
}
static int setup_compute_opt(char *opt)
{
if (compute == COMPUTE_WEIGHTED_DIFF)
return setup_compute_opt_wdiff(opt);
if (opt) {
pr_err("Failed: extra option specified '%s'", opt);
return -EINVAL;
}
return 0;
}
static int setup_compute(const struct option *opt, const char *str,
int unset __maybe_unused)
{
int *cp = (int *) opt->value;
char *cstr = (char *) str;
char buf[50];
unsigned i;
char *option;
if (!str) {
*cp = COMPUTE_DELTA;
return 0;
}
option = strchr(str, ':');
if (option) {
unsigned len = option++ - str;
/*
* The str data are not writeable, so we need
* to use another buffer.
*/
/* No option value is longer. */
if (len >= sizeof(buf))
return -EINVAL;
strncpy(buf, str, len);
buf[len] = 0x0;
cstr = buf;
}
for (i = 0; i < COMPUTE_MAX; i++)
if (!strcmp(cstr, compute_names[i])) {
*cp = i;
return setup_compute_opt(option);
}
pr_err("Failed: '%s' is not computation method "
"(use 'delta','ratio' or 'wdiff')\n", str);
return -EINVAL;
}
static double period_percent(struct hist_entry *he, u64 period)
{
u64 total = hists__total_period(he->hists);
return (period * 100.0) / total;
}
static double compute_delta(struct hist_entry *he, struct hist_entry *pair)
{
double old_percent = period_percent(he, he->stat.period);
double new_percent = period_percent(pair, pair->stat.period);
pair->diff.period_ratio_delta = new_percent - old_percent;
pair->diff.computed = true;
return pair->diff.period_ratio_delta;
}
static double compute_ratio(struct hist_entry *he, struct hist_entry *pair)
{
double old_period = he->stat.period ?: 1;
double new_period = pair->stat.period;
pair->diff.computed = true;
pair->diff.period_ratio = new_period / old_period;
return pair->diff.period_ratio;
}
static s64 compute_wdiff(struct hist_entry *he, struct hist_entry *pair)
{
u64 old_period = he->stat.period;
u64 new_period = pair->stat.period;
pair->diff.computed = true;
pair->diff.wdiff = new_period * compute_wdiff_w2 -
old_period * compute_wdiff_w1;
return pair->diff.wdiff;
}
static int formula_delta(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
u64 he_total = he->hists->stats.total_period;
u64 pair_total = pair->hists->stats.total_period;
if (symbol_conf.filter_relative) {
he_total = he->hists->stats.total_non_filtered_period;
pair_total = pair->hists->stats.total_non_filtered_period;
}
return scnprintf(buf, size,
"(%" PRIu64 " * 100 / %" PRIu64 ") - "
"(%" PRIu64 " * 100 / %" PRIu64 ")",
pair->stat.period, pair_total,
he->stat.period, he_total);
}
static int formula_ratio(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
double old_period = he->stat.period;
double new_period = pair->stat.period;
return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period);
}
static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
u64 old_period = he->stat.period;
u64 new_period = pair->stat.period;
return scnprintf(buf, size,
"(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")",
new_period, compute_wdiff_w2, old_period, compute_wdiff_w1);
}
static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
switch (compute) {
case COMPUTE_DELTA:
case COMPUTE_DELTA_ABS:
return formula_delta(he, pair, buf, size);
case COMPUTE_RATIO:
return formula_ratio(he, pair, buf, size);
case COMPUTE_WEIGHTED_DIFF:
return formula_wdiff(he, pair, buf, size);
default:
BUG_ON(1);
}
return -1;
}
static void *block_hist_zalloc(size_t size)
{
struct block_hist *bh;
bh = zalloc(size + sizeof(*bh));
if (!bh)
return NULL;
return &bh->he;
}
static void block_hist_free(void *he)
{
struct block_hist *bh;
bh = container_of(he, struct block_hist, he);
hists__delete_entries(&bh->block_hists);
free(bh);
}
struct hist_entry_ops block_hist_ops = {
.new = block_hist_zalloc,
.free = block_hist_free,
};
static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct perf_diff *pdiff = container_of(tool, struct perf_diff, tool);
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = sample,
.ops = &hist_iter_normal,
};
int ret = -1;
if (perf_time__ranges_skip_sample(pdiff->ptime_range, pdiff->range_num,
sample->time)) {
return 0;
}
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
ret = -1;
goto out;
}
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) {
ret = 0;
goto out;
}
switch (compute) {
case COMPUTE_CYCLES:
if (!hists__add_entry_ops(hists, &block_hist_ops, &al, NULL,
NULL, NULL, NULL, sample, true)) {
pr_warning("problem incrementing symbol period, "
"skipping event\n");
goto out;
}
hist__account_cycles(sample->branch_stack, &al, sample, false,
NULL);
break;
case COMPUTE_STREAM:
if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
NULL)) {
pr_debug("problem adding hist entry, skipping event\n");
goto out;
}
break;
default:
if (!hists__add_entry(hists, &al, NULL, NULL, NULL, NULL, sample,
true)) {
pr_warning("problem incrementing symbol period, "
"skipping event\n");
goto out;
}
}
/*
* The total_period is updated here before going to the output
* tree since normally only the baseline hists will call
* hists__output_resort() and precompute needs the total
* period in order to sort entries by percentage delta.
*/
hists->stats.total_period += sample->period;
if (!al.filtered)
hists->stats.total_non_filtered_period += sample->period;
ret = 0;
out:
addr_location__exit(&al);
return ret;
}
static struct perf_diff pdiff = {
.tool = {
.sample = diff__process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.namespaces = perf_event__process_namespaces,
.cgroup = perf_event__process_cgroup,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
};
static struct evsel *evsel_match(struct evsel *evsel,
struct evlist *evlist)
{
struct evsel *e;
evlist__for_each_entry(evlist, e) {
if (evsel__match2(evsel, e))
return e;
}
return NULL;
}
static void evlist__collapse_resort(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
struct hists *hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
}
}
static struct data__file *fmt_to_data_file(struct perf_hpp_fmt *fmt)
{
struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt);
void *ptr = dfmt - dfmt->idx;
struct data__file *d = container_of(ptr, struct data__file, fmt);
return d;
}
static struct hist_entry*
get_pair_data(struct hist_entry *he, struct data__file *d)
{
if (hist_entry__has_pairs(he)) {
struct hist_entry *pair;
list_for_each_entry(pair, &he->pairs.head, pairs.node)
if (pair->hists == d->hists)
return pair;
}
return NULL;
}
static struct hist_entry*
get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
{
struct data__file *d = fmt_to_data_file(&dfmt->fmt);
return get_pair_data(he, d);
}
static void hists__baseline_only(struct hists *hists)
{
struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
rb_erase_cached(&he->rb_node_in, root);
hist_entry__delete(he);
}
}
}
static int64_t block_cycles_diff_cmp(struct hist_entry *left,
struct hist_entry *right)
{
bool pairs_left = hist_entry__has_pairs(left);
bool pairs_right = hist_entry__has_pairs(right);
s64 l, r;
if (!pairs_left && !pairs_right)
return 0;
l = llabs(left->diff.cycles);
r = llabs(right->diff.cycles);
return r - l;
}
static int64_t block_sort(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return block_cycles_diff_cmp(right, left);
}
static void init_block_hist(struct block_hist *bh)
{
__hists__init(&bh->block_hists, &bh->block_list);
perf_hpp_list__init(&bh->block_list);
INIT_LIST_HEAD(&bh->block_fmt.list);
INIT_LIST_HEAD(&bh->block_fmt.sort_list);
bh->block_fmt.cmp = block_info__cmp;
bh->block_fmt.sort = block_sort;
perf_hpp_list__register_sort_field(&bh->block_list,
&bh->block_fmt);
bh->valid = true;
}
static struct hist_entry *get_block_pair(struct hist_entry *he,
struct hists *hists_pair)
{
struct rb_root_cached *root = hists_pair->entries_in;
struct rb_node *next = rb_first_cached(root);
int64_t cmp;
while (next != NULL) {
struct hist_entry *he_pair = rb_entry(next, struct hist_entry,
rb_node_in);
next = rb_next(&he_pair->rb_node_in);
cmp = __block_info__cmp(he_pair, he);
if (!cmp)
return he_pair;
}
return NULL;
}
static void init_spark_values(unsigned long *svals, int num)
{
for (int i = 0; i < num; i++)
svals[i] = 0;
}
static void update_spark_value(unsigned long *svals, int num,
struct stats *stats, u64 val)
{
int n = stats->n;
if (n < num)
svals[n] = val;
}
static void compute_cycles_diff(struct hist_entry *he,
struct hist_entry *pair)
{
pair->diff.computed = true;
if (pair->block_info->num && he->block_info->num) {
pair->diff.cycles =
pair->block_info->cycles_aggr / pair->block_info->num_aggr -
he->block_info->cycles_aggr / he->block_info->num_aggr;
if (!cycles_hist)
return;
init_stats(&pair->diff.stats);
init_spark_values(pair->diff.svals, NUM_SPARKS);
for (int i = 0; i < pair->block_info->num; i++) {
u64 val;
if (i >= he->block_info->num || i >= NUM_SPARKS)
break;
val = llabs(pair->block_info->cycles_spark[i] -
he->block_info->cycles_spark[i]);
update_spark_value(pair->diff.svals, NUM_SPARKS,
&pair->diff.stats, val);
update_stats(&pair->diff.stats, val);
}
}
}
static void block_hists_match(struct hists *hists_base,
struct hists *hists_pair)
{
struct rb_root_cached *root = hists_base->entries_in;
struct rb_node *next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry,
rb_node_in);
struct hist_entry *pair = get_block_pair(he, hists_pair);
next = rb_next(&he->rb_node_in);
if (pair) {
hist_entry__add_pair(pair, he);
compute_cycles_diff(he, pair);
}
}
}
static void hists__precompute(struct hists *hists)
{
struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
next = rb_first_cached(root);
while (next != NULL) {
struct block_hist *bh, *pair_bh;
struct hist_entry *he, *pair;
struct data__file *d;
int i;
he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (compute == COMPUTE_CYCLES) {
bh = container_of(he, struct block_hist, he);
init_block_hist(bh);
block_info__process_sym(he, bh, NULL, 0);
}
data__for_each_file_new(i, d) {
pair = get_pair_data(he, d);
if (!pair)
continue;
switch (compute) {
case COMPUTE_DELTA:
case COMPUTE_DELTA_ABS:
compute_delta(he, pair);
break;
case COMPUTE_RATIO:
compute_ratio(he, pair);
break;
case COMPUTE_WEIGHTED_DIFF:
compute_wdiff(he, pair);
break;
case COMPUTE_CYCLES:
pair_bh = container_of(pair, struct block_hist,
he);
init_block_hist(pair_bh);
block_info__process_sym(pair, pair_bh, NULL, 0);
bh = container_of(he, struct block_hist, he);
if (bh->valid && pair_bh->valid) {
block_hists_match(&bh->block_hists,
&pair_bh->block_hists);
hists__output_resort(&pair_bh->block_hists,
NULL);
}
break;
default:
BUG_ON(1);
}
}
}
}
static int64_t cmp_doubles(double l, double r)
{
if (l > r)
return -1;
else if (l < r)
return 1;
else
return 0;
}
static int64_t
__hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
int c)
{
switch (c) {
case COMPUTE_DELTA:
{
double l = left->diff.period_ratio_delta;
double r = right->diff.period_ratio_delta;
return cmp_doubles(l, r);
}
case COMPUTE_DELTA_ABS:
{
double l = fabs(left->diff.period_ratio_delta);
double r = fabs(right->diff.period_ratio_delta);
return cmp_doubles(l, r);
}
case COMPUTE_RATIO:
{
double l = left->diff.period_ratio;
double r = right->diff.period_ratio;
return cmp_doubles(l, r);
}
case COMPUTE_WEIGHTED_DIFF:
{
s64 l = left->diff.wdiff;
s64 r = right->diff.wdiff;
return r - l;
}
default:
BUG_ON(1);
}
return 0;
}
static int64_t
hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
int c, int sort_idx)
{
bool pairs_left = hist_entry__has_pairs(left);
bool pairs_right = hist_entry__has_pairs(right);
struct hist_entry *p_right, *p_left;
if (!pairs_left && !pairs_right)
return 0;
if (!pairs_left || !pairs_right)
return pairs_left ? -1 : 1;
p_left = get_pair_data(left, &data__files[sort_idx]);
p_right = get_pair_data(right, &data__files[sort_idx]);
if (!p_left && !p_right)
return 0;
if (!p_left || !p_right)
return p_left ? -1 : 1;
/*
* We have 2 entries of same kind, let's
* make the data comparison.
*/
return __hist_entry__cmp_compute(p_left, p_right, c);
}
static int64_t
hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
int c, int sort_idx)
{
struct hist_entry *p_right, *p_left;
p_left = get_pair_data(left, &data__files[sort_idx]);
p_right = get_pair_data(right, &data__files[sort_idx]);
if (!p_left && !p_right)
return 0;
if (!p_left || !p_right)
return p_left ? -1 : 1;
if (c != COMPUTE_DELTA && c != COMPUTE_DELTA_ABS) {
/*
* The delta can be computed without the baseline, but
* others are not. Put those entries which have no
* values below.
*/
if (left->dummy && right->dummy)
return 0;
if (left->dummy || right->dummy)
return left->dummy ? 1 : -1;
}
return __hist_entry__cmp_compute(p_left, p_right, c);
}
static int64_t
hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left __maybe_unused,
struct hist_entry *right __maybe_unused)
{
return 0;
}
static int64_t
hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
if (left->stat.period == right->stat.period)
return 0;
return left->stat.period > right->stat.period ? 1 : -1;
}
static int64_t
hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right)
{
struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
}
static int64_t
hist_entry__cmp_delta_abs(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right)
{
struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_DELTA_ABS, d->idx);
}
static int64_t
hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right)
{
struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx);
}
static int64_t
hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right)
{
struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
}
static int64_t
hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
sort_compute);
}
static int64_t
hist_entry__cmp_delta_abs_idx(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA_ABS,
sort_compute);
}
static int64_t
hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
sort_compute);
}
static int64_t
hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
sort_compute);
}
static void hists__process(struct hists *hists)
{
if (show_baseline_only)
hists__baseline_only(hists);
hists__precompute(hists);
hists__output_resort(hists, NULL);
if (compute == COMPUTE_CYCLES)
symbol_conf.report_block = true;
hists__fprintf(hists, !quiet, 0, 0, 0, stdout,
!symbol_conf.use_callchain);
}
static void data__fprintf(void)
{
struct data__file *d;
int i;
fprintf(stdout, "# Data files:\n");
data__for_each_file(i, d)
fprintf(stdout, "# [%d] %s %s\n",
d->idx, d->data.path,
!d->idx ? "(Baseline)" : "");
fprintf(stdout, "#\n");
}
static void data_process(void)
{
struct evlist *evlist_base = data__files[0].session->evlist;
struct evsel *evsel_base;
bool first = true;
evlist__for_each_entry(evlist_base, evsel_base) {
struct hists *hists_base = evsel__hists(evsel_base);
struct data__file *d;
int i;
data__for_each_file_new(i, d) {
struct evlist *evlist = d->session->evlist;
struct evsel *evsel;
struct hists *hists;
evsel = evsel_match(evsel_base, evlist);
if (!evsel)
continue;
hists = evsel__hists(evsel);
d->hists = hists;
hists__match(hists_base, hists);
if (!show_baseline_only)
hists__link(hists_base, hists);
}
if (!quiet) {
fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n",
evsel__name(evsel_base));
}
first = false;
if (verbose > 0 || ((data__files_cnt > 2) && !quiet))
data__fprintf();
/* Don't sort callchain for perf diff */
evsel__reset_sample_bit(evsel_base, CALLCHAIN);
hists__process(hists_base);
}
}
static int process_base_stream(struct data__file *data_base,
struct data__file *data_pair,
const char *title __maybe_unused)
{
struct evlist *evlist_base = data_base->session->evlist;
struct evlist *evlist_pair = data_pair->session->evlist;
struct evsel *evsel_base, *evsel_pair;
struct evsel_streams *es_base, *es_pair;
evlist__for_each_entry(evlist_base, evsel_base) {
evsel_pair = evsel_match(evsel_base, evlist_pair);
if (!evsel_pair)
continue;
es_base = evsel_streams__entry(data_base->evlist_streams,
evsel_base->core.idx);
if (!es_base)
return -1;
es_pair = evsel_streams__entry(data_pair->evlist_streams,
evsel_pair->core.idx);
if (!es_pair)
return -1;
evsel_streams__match(es_base, es_pair);
evsel_streams__report(es_base, es_pair);
}
return 0;
}
static void stream_process(void)
{
/*
* Stream comparison only supports two data files.
* perf.data.old and perf.data. data__files[0] is perf.data.old,
* data__files[1] is perf.data.
*/
process_base_stream(&data__files[0], &data__files[1],
"# Output based on old perf data:\n#\n");
}
static void data__free(struct data__file *d)
{
int col;
if (d->evlist_streams)
evlist_streams__delete(d->evlist_streams);
for (col = 0; col < PERF_HPP_DIFF__MAX_INDEX; col++) {
struct diff_hpp_fmt *fmt = &d->fmt[col];
zfree(&fmt->header);
}
}
static int abstime_str_dup(char **pstr)
{
char *str = NULL;
if (pdiff.time_str && strchr(pdiff.time_str, ':')) {
str = strdup(pdiff.time_str);
if (!str)
return -ENOMEM;
}
*pstr = str;
return 0;
}
static int parse_absolute_time(struct data__file *d, char **pstr)
{
char *p = *pstr;
int ret;
/*
* Absolute timestamp for one file has the format: a.b,c.d
* For multiple files, the format is: a.b,c.d:a.b,c.d
*/
p = strchr(*pstr, ':');
if (p) {
if (p == *pstr) {
pr_err("Invalid time string\n");
return -EINVAL;
}
*p = 0;
p++;
if (*p == 0) {
pr_err("Invalid time string\n");
return -EINVAL;
}
}
ret = perf_time__parse_for_ranges(*pstr, d->session,
&pdiff.ptime_range,
&pdiff.range_size,
&pdiff.range_num);
if (ret < 0)
return ret;
if (!p || *p == 0)
*pstr = NULL;
else
*pstr = p;
return ret;
}
static int parse_percent_time(struct data__file *d)
{
int ret;
ret = perf_time__parse_for_ranges(pdiff.time_str, d->session,
&pdiff.ptime_range,
&pdiff.range_size,
&pdiff.range_num);
return ret;
}
static int parse_time_str(struct data__file *d, char *abstime_ostr,
char **pabstime_tmp)
{
int ret = 0;
if (abstime_ostr)
ret = parse_absolute_time(d, pabstime_tmp);
else if (pdiff.time_str)
ret = parse_percent_time(d);
return ret;
}
static int check_file_brstack(void)
{
struct data__file *d;
bool has_br_stack;
int i;
data__for_each_file(i, d) {
d->session = perf_session__new(&d->data, &pdiff.tool);
if (IS_ERR(d->session)) {
pr_err("Failed to open %s\n", d->data.path);
return PTR_ERR(d->session);
}
has_br_stack = perf_header__has_feat(&d->session->header,
HEADER_BRANCH_STACK);
perf_session__delete(d->session);
if (!has_br_stack)
return 0;
}
/* Set only all files having branch stacks */
pdiff.has_br_stack = true;
return 0;
}
static int __cmd_diff(void)
{
struct data__file *d;
int ret, i;
char *abstime_ostr, *abstime_tmp;
ret = abstime_str_dup(&abstime_ostr);
if (ret)
return ret;
abstime_tmp = abstime_ostr;
ret = -EINVAL;
data__for_each_file(i, d) {
d->session = perf_session__new(&d->data, &pdiff.tool);
if (IS_ERR(d->session)) {
ret = PTR_ERR(d->session);
pr_err("Failed to open %s\n", d->data.path);
goto out_delete;
}
if (pdiff.time_str) {
ret = parse_time_str(d, abstime_ostr, &abstime_tmp);
if (ret < 0)
goto out_delete;
}
if (cpu_list) {
ret = perf_session__cpu_bitmap(d->session, cpu_list,
cpu_bitmap);
if (ret < 0)
goto out_delete;
}
ret = perf_session__process_events(d->session);
if (ret) {
pr_err("Failed to process %s\n", d->data.path);
goto out_delete;
}
evlist__collapse_resort(d->session->evlist);
if (pdiff.ptime_range)
zfree(&pdiff.ptime_range);
if (compute == COMPUTE_STREAM) {
d->evlist_streams = evlist__create_streams(
d->session->evlist, 5);
if (!d->evlist_streams) {
ret = -ENOMEM;
goto out_delete;
}
}
}
if (compute == COMPUTE_STREAM)
stream_process();
else
data_process();
out_delete:
data__for_each_file(i, d) {
if (!IS_ERR(d->session))
perf_session__delete(d->session);
data__free(d);
}
free(data__files);
if (pdiff.ptime_range)
zfree(&pdiff.ptime_range);
if (abstime_ostr)
free(abstime_ostr);
return ret;
}
static const char * const diff_usage[] = {
"perf diff [<options>] [old_file] [new_file]",
NULL,
};
static const struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
"Show only items with match in baseline"),
OPT_CALLBACK('c', "compute", &compute,
"delta,delta-abs,ratio,wdiff:w1,w2 (default delta-abs),cycles",
"Entries differential computation selection",
setup_compute),
OPT_BOOLEAN('p', "period", &show_period,
"Show period values."),
OPT_BOOLEAN('F', "formula", &show_formula,
"Show formula."),
OPT_BOOLEAN(0, "cycles-hist", &cycles_hist,
"Show cycles histogram and standard deviation "
"- WARNING: use only with -c cycles."),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
" Please refer the man page for the complete list."),
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
OPT_STRING(0, "time", &pdiff.time_str, "str",
"Time span (time percent or absolute timestamp)"),
OPT_STRING(0, "cpu", &cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
"only consider symbols in these pids"),
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
"only consider symbols in these tids"),
OPT_BOOLEAN(0, "stream", &pdiff.stream,
"Enable hot streams comparison."),
OPT_END()
};
static double baseline_percent(struct hist_entry *he)
{
u64 total = hists__total_period(he->hists);
return 100.0 * he->stat.period / total;
}
static int hpp__color_baseline(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
double percent = baseline_percent(he);
char pfmt[20] = " ";
if (!he->dummy) {
scnprintf(pfmt, 20, "%%%d.2f%%%%", dfmt->header_width - 1);
return percent_color_snprintf(hpp->buf, hpp->size,
pfmt, percent);
} else
return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, pfmt);
}
static int hpp__entry_baseline(struct hist_entry *he, char *buf, size_t size)
{
double percent = baseline_percent(he);
const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
int ret = 0;
if (!he->dummy)
ret = scnprintf(buf, size, fmt, percent);
return ret;
}
static int cycles_printf(struct hist_entry *he, struct hist_entry *pair,
struct perf_hpp *hpp, int width)
{
struct block_hist *bh = container_of(he, struct block_hist, he);
struct block_hist *bh_pair = container_of(pair, struct block_hist, he);
struct hist_entry *block_he;
struct block_info *bi;
char buf[128];
char *start_line, *end_line;
block_he = hists__get_entry(&bh_pair->block_hists, bh->block_idx);
if (!block_he) {
hpp->skip = true;
return 0;
}
/*
* Avoid printing the warning "addr2line_init failed for ..."
*/
symbol_conf.disable_add2line_warn = true;
bi = block_he->block_info;
start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
he->ms.sym);
end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
he->ms.sym);
if (start_line != SRCLINE_UNKNOWN &&
end_line != SRCLINE_UNKNOWN) {
scnprintf(buf, sizeof(buf), "[%s -> %s] %4ld",
start_line, end_line, block_he->diff.cycles);
} else {
scnprintf(buf, sizeof(buf), "[%7lx -> %7lx] %4ld",
bi->start, bi->end, block_he->diff.cycles);
}
zfree_srcline(&start_line);
zfree_srcline(&end_line);
return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
}
static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he,
int comparison_method)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
struct hist_entry *pair = get_pair_fmt(he, dfmt);
double diff;
s64 wdiff;
char pfmt[20] = " ";
if (!pair) {
if (comparison_method == COMPUTE_CYCLES) {
struct block_hist *bh;
bh = container_of(he, struct block_hist, he);
if (bh->block_idx)
hpp->skip = true;
}
goto no_print;
}
switch (comparison_method) {
case COMPUTE_DELTA:
if (pair->diff.computed)
diff = pair->diff.period_ratio_delta;
else
diff = compute_delta(he, pair);
scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
return percent_color_snprintf(hpp->buf, hpp->size,
pfmt, diff);
case COMPUTE_RATIO:
if (he->dummy)
goto dummy_print;
if (pair->diff.computed)
diff = pair->diff.period_ratio;
else
diff = compute_ratio(he, pair);
scnprintf(pfmt, 20, "%%%d.6f", dfmt->header_width);
return value_color_snprintf(hpp->buf, hpp->size,
pfmt, diff);
case COMPUTE_WEIGHTED_DIFF:
if (he->dummy)
goto dummy_print;
if (pair->diff.computed)
wdiff = pair->diff.wdiff;
else
wdiff = compute_wdiff(he, pair);
scnprintf(pfmt, 20, "%%14ld", dfmt->header_width);
return color_snprintf(hpp->buf, hpp->size,
get_percent_color(wdiff),
pfmt, wdiff);
case COMPUTE_CYCLES:
return cycles_printf(he, pair, hpp, dfmt->header_width);
default:
BUG_ON(1);
}
dummy_print:
return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, "N/A");
no_print:
return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, pfmt);
}
static int hpp__color_delta(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
return __hpp__color_compare(fmt, hpp, he, COMPUTE_DELTA);
}
static int hpp__color_ratio(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
return __hpp__color_compare(fmt, hpp, he, COMPUTE_RATIO);
}
static int hpp__color_wdiff(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
return __hpp__color_compare(fmt, hpp, he, COMPUTE_WEIGHTED_DIFF);
}
static int hpp__color_cycles(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
return __hpp__color_compare(fmt, hpp, he, COMPUTE_CYCLES);
}
static int all_zero(unsigned long *vals, int len)
{
int i;
for (i = 0; i < len; i++)
if (vals[i] != 0)
return 0;
return 1;
}
static int print_cycles_spark(char *bf, int size, unsigned long *svals, u64 n)
{
int printed;
if (n <= 1)
return 0;
if (n > NUM_SPARKS)
n = NUM_SPARKS;
if (all_zero(svals, n))
return 0;
printed = print_spark(bf, size, svals, n);
printed += scnprintf(bf + printed, size - printed, " ");
return printed;
}
static int hpp__color_cycles_hist(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
struct hist_entry *pair = get_pair_fmt(he, dfmt);
struct block_hist *bh = container_of(he, struct block_hist, he);
struct block_hist *bh_pair;
struct hist_entry *block_he;
char spark[32], buf[128];
double r;
int ret, pad;
if (!pair) {
if (bh->block_idx)
hpp->skip = true;
goto no_print;
}
bh_pair = container_of(pair, struct block_hist, he);
block_he = hists__get_entry(&bh_pair->block_hists, bh->block_idx);
if (!block_he) {
hpp->skip = true;
goto no_print;
}
ret = print_cycles_spark(spark, sizeof(spark), block_he->diff.svals,
block_he->diff.stats.n);
r = rel_stddev_stats(stddev_stats(&block_he->diff.stats),
avg_stats(&block_he->diff.stats));
if (ret) {
/*
* Padding spaces if number of sparks less than NUM_SPARKS
* otherwise the output is not aligned.
*/
pad = NUM_SPARKS - ((ret - 1) / 3);
scnprintf(buf, sizeof(buf), "%s%5.1f%% %s", "\u00B1", r, spark);
ret = scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, buf);
if (pad) {
ret += scnprintf(hpp->buf + ret, hpp->size - ret,
"%-*s", pad, " ");
}
return ret;
}
no_print:
return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, " ");
}
static void
hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size)
{
switch (idx) {
case PERF_HPP_DIFF__PERIOD_BASELINE:
scnprintf(buf, size, "%" PRIu64, he->stat.period);
break;
default:
break;
}
}
static void
hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
int idx, char *buf, size_t size)
{
double diff;
double ratio;
s64 wdiff;
switch (idx) {
case PERF_HPP_DIFF__DELTA:
case PERF_HPP_DIFF__DELTA_ABS:
if (pair->diff.computed)
diff = pair->diff.period_ratio_delta;
else
diff = compute_delta(he, pair);
scnprintf(buf, size, "%+4.2F%%", diff);
break;
case PERF_HPP_DIFF__RATIO:
/* No point for ratio number if we are dummy.. */
if (he->dummy) {
scnprintf(buf, size, "N/A");
break;
}
if (pair->diff.computed)
ratio = pair->diff.period_ratio;
else
ratio = compute_ratio(he, pair);
if (ratio > 0.0)
scnprintf(buf, size, "%14.6F", ratio);
break;
case PERF_HPP_DIFF__WEIGHTED_DIFF:
/* No point for wdiff number if we are dummy.. */
if (he->dummy) {
scnprintf(buf, size, "N/A");
break;
}
if (pair->diff.computed)
wdiff = pair->diff.wdiff;
else
wdiff = compute_wdiff(he, pair);
if (wdiff != 0)
scnprintf(buf, size, "%14ld", wdiff);
break;
case PERF_HPP_DIFF__FORMULA:
formula_fprintf(he, pair, buf, size);
break;
case PERF_HPP_DIFF__PERIOD:
scnprintf(buf, size, "%" PRIu64, pair->stat.period);
break;
default:
BUG_ON(1);
}
}
static void
__hpp__entry_global(struct hist_entry *he, struct diff_hpp_fmt *dfmt,
char *buf, size_t size)
{
struct hist_entry *pair = get_pair_fmt(he, dfmt);
int idx = dfmt->idx;
/* baseline is special */
if (idx == PERF_HPP_DIFF__BASELINE)
hpp__entry_baseline(he, buf, size);
else {
if (pair)
hpp__entry_pair(he, pair, idx, buf, size);
else
hpp__entry_unpair(he, idx, buf, size);
}
}
static int hpp__entry_global(struct perf_hpp_fmt *_fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct diff_hpp_fmt *dfmt =
container_of(_fmt, struct diff_hpp_fmt, fmt);
char buf[MAX_COL_WIDTH] = " ";
__hpp__entry_global(he, dfmt, buf, MAX_COL_WIDTH);
if (symbol_conf.field_sep)
return scnprintf(hpp->buf, hpp->size, "%s", buf);
else
return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, buf);
}
static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hists *hists __maybe_unused,
int line __maybe_unused,
int *span __maybe_unused)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
BUG_ON(!dfmt->header);
return scnprintf(hpp->buf, hpp->size, dfmt->header);
}
static int hpp__width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists __maybe_unused)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
BUG_ON(dfmt->header_width <= 0);
return dfmt->header_width;
}
static void init_header(struct data__file *d, struct diff_hpp_fmt *dfmt)
{
#define MAX_HEADER_NAME 100
char buf_indent[MAX_HEADER_NAME];
char buf[MAX_HEADER_NAME];
const char *header = NULL;
int width = 0;
BUG_ON(dfmt->idx >= PERF_HPP_DIFF__MAX_INDEX);
header = columns[dfmt->idx].name;
width = columns[dfmt->idx].width;
/* Only our defined HPP fmts should appear here. */
BUG_ON(!header);
if (data__files_cnt > 2)
scnprintf(buf, MAX_HEADER_NAME, "%s/%d", header, d->idx);
#define NAME (data__files_cnt > 2 ? buf : header)
dfmt->header_width = width;
width = (int) strlen(NAME);
if (dfmt->header_width < width)
dfmt->header_width = width;
scnprintf(buf_indent, MAX_HEADER_NAME, "%*s",
dfmt->header_width, NAME);
dfmt->header = strdup(buf_indent);
#undef MAX_HEADER_NAME
#undef NAME
}
static void data__hpp_register(struct data__file *d, int idx)
{
struct diff_hpp_fmt *dfmt = &d->fmt[idx];
struct perf_hpp_fmt *fmt = &dfmt->fmt;
dfmt->idx = idx;
fmt->header = hpp__header;
fmt->width = hpp__width;
fmt->entry = hpp__entry_global;
fmt->cmp = hist_entry__cmp_nop;
fmt->collapse = hist_entry__cmp_nop;
/* TODO more colors */
switch (idx) {
case PERF_HPP_DIFF__BASELINE:
fmt->color = hpp__color_baseline;
fmt->sort = hist_entry__cmp_baseline;
break;
case PERF_HPP_DIFF__DELTA:
fmt->color = hpp__color_delta;
fmt->sort = hist_entry__cmp_delta;
break;
case PERF_HPP_DIFF__RATIO:
fmt->color = hpp__color_ratio;
fmt->sort = hist_entry__cmp_ratio;
break;
case PERF_HPP_DIFF__WEIGHTED_DIFF:
fmt->color = hpp__color_wdiff;
fmt->sort = hist_entry__cmp_wdiff;
break;
case PERF_HPP_DIFF__DELTA_ABS:
fmt->color = hpp__color_delta;
fmt->sort = hist_entry__cmp_delta_abs;
break;
case PERF_HPP_DIFF__CYCLES:
fmt->color = hpp__color_cycles;
fmt->sort = hist_entry__cmp_nop;
break;
case PERF_HPP_DIFF__CYCLES_HIST:
fmt->color = hpp__color_cycles_hist;
fmt->sort = hist_entry__cmp_nop;
break;
default:
fmt->sort = hist_entry__cmp_nop;
break;
}
init_header(d, dfmt);
perf_hpp__column_register(fmt);
perf_hpp__register_sort_field(fmt);
}
static int ui_init(void)
{
struct data__file *d;
struct perf_hpp_fmt *fmt;
int i;
data__for_each_file(i, d) {
/*
* Baseline or compute related columns:
*
* PERF_HPP_DIFF__BASELINE
* PERF_HPP_DIFF__DELTA
* PERF_HPP_DIFF__RATIO
* PERF_HPP_DIFF__WEIGHTED_DIFF
* PERF_HPP_DIFF__CYCLES
*/
data__hpp_register(d, i ? compute_2_hpp[compute] :
PERF_HPP_DIFF__BASELINE);
if (cycles_hist && i)
data__hpp_register(d, PERF_HPP_DIFF__CYCLES_HIST);
/*
* And the rest:
*
* PERF_HPP_DIFF__FORMULA
* PERF_HPP_DIFF__PERIOD
* PERF_HPP_DIFF__PERIOD_BASELINE
*/
if (show_formula && i)
data__hpp_register(d, PERF_HPP_DIFF__FORMULA);
if (show_period)
data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
PERF_HPP_DIFF__PERIOD_BASELINE);
}
if (!sort_compute)
return 0;
/*
* Prepend an fmt to sort on columns at 'sort_compute' first.
* This fmt is added only to the sort list but not to the
* output fields list.
*
* Note that this column (data) can be compared twice - one
* for this 'sort_compute' fmt and another for the normal
* diff_hpp_fmt. But it shouldn't a problem as most entries
* will be sorted out by first try or baseline and comparing
* is not a costly operation.
*/
fmt = zalloc(sizeof(*fmt));
if (fmt == NULL) {
pr_err("Memory allocation failed\n");
return -1;
}
fmt->cmp = hist_entry__cmp_nop;
fmt->collapse = hist_entry__cmp_nop;
switch (compute) {
case COMPUTE_DELTA:
fmt->sort = hist_entry__cmp_delta_idx;
break;
case COMPUTE_RATIO:
fmt->sort = hist_entry__cmp_ratio_idx;
break;
case COMPUTE_WEIGHTED_DIFF:
fmt->sort = hist_entry__cmp_wdiff_idx;
break;
case COMPUTE_DELTA_ABS:
fmt->sort = hist_entry__cmp_delta_abs_idx;
break;
case COMPUTE_CYCLES:
/*
* Should set since 'fmt->sort' is called without
* checking valid during sorting
*/
fmt->sort = hist_entry__cmp_nop;
break;
default:
BUG_ON(1);
}
perf_hpp__prepend_sort_field(fmt);
return 0;
}
static int data_init(int argc, const char **argv)
{
struct data__file *d;
static const char *defaults[] = {
"perf.data.old",
"perf.data",
};
bool use_default = true;
int i;
data__files_cnt = 2;
if (argc) {
if (argc == 1)
defaults[1] = argv[0];
else {
data__files_cnt = argc;
use_default = false;
}
} else if (perf_guest) {
defaults[0] = "perf.data.host";
defaults[1] = "perf.data.guest";
}
if (sort_compute >= (unsigned int) data__files_cnt) {
pr_err("Order option out of limit.\n");
return -EINVAL;
}
data__files = zalloc(sizeof(*data__files) * data__files_cnt);
if (!data__files)
return -ENOMEM;
data__for_each_file(i, d) {
struct perf_data *data = &d->data;
data->path = use_default ? defaults[i] : argv[i];
data->mode = PERF_DATA_MODE_READ;
data->force = force;
d->idx = i;
}
return 0;
}
static int diff__config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "diff.order")) {
int ret;
if (perf_config_int(&ret, var, value) < 0)
return -1;
sort_compute = ret;
return 0;
}
if (!strcmp(var, "diff.compute")) {
if (!strcmp(value, "delta")) {
compute = COMPUTE_DELTA;
} else if (!strcmp(value, "delta-abs")) {
compute = COMPUTE_DELTA_ABS;
} else if (!strcmp(value, "ratio")) {
compute = COMPUTE_RATIO;
} else if (!strcmp(value, "wdiff")) {
compute = COMPUTE_WEIGHTED_DIFF;
} else {
pr_err("Invalid compute method: %s\n", value);
return -1;
}
}
return 0;
}
int cmd_diff(int argc, const char **argv)
{
int ret = hists__init();
if (ret < 0)
return ret;
perf_config(diff__config, NULL);
argc = parse_options(argc, argv, options, diff_usage, 0);
if (quiet)
perf_quiet_option();
if (cycles_hist && (compute != COMPUTE_CYCLES))
usage_with_options(diff_usage, options);
if (pdiff.stream)
compute = COMPUTE_STREAM;
symbol__annotation_init();
if (symbol__init(NULL) < 0)
return -1;
if (data_init(argc, argv) < 0)
return -1;
if (check_file_brstack() < 0)
return -1;
if ((compute == COMPUTE_CYCLES || compute == COMPUTE_STREAM)
&& !pdiff.has_br_stack) {
return -1;
}
if (compute == COMPUTE_STREAM) {
symbol_conf.show_branchflag_count = true;
symbol_conf.disable_add2line_warn = true;
callchain_param.mode = CHAIN_FLAT;
callchain_param.key = CCKEY_SRCLINE;
callchain_param.branch_callstack = 1;
symbol_conf.use_callchain = true;
callchain_register_param(&callchain_param);
sort_order = "srcline,symbol,dso";
} else {
if (ui_init() < 0)
return -1;
sort__mode = SORT_MODE__DIFF;
}
if (setup_sorting(NULL) < 0)
usage_with_options(diff_usage, options);
setup_pager();
sort__setup_elide(NULL);
return __cmd_diff();
}
| linux-master | tools/perf/builtin-diff.c |
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
#include "util/dso.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/config.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/callchain.h"
#include "util/time-utils.h"
#include <linux/err.h>
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/data.h"
#include "util/cpumap.h"
#include "util/debug.h"
#include "util/string2.h"
#include "util/util.h"
#include <linux/kernel.h>
#include <linux/numa.h>
#include <linux/rbtree.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <errno.h>
#include <inttypes.h>
#include <locale.h>
#include <regex.h>
#include <linux/ctype.h>
#include <traceevent/event-parse.h>
static int kmem_slab;
static int kmem_page;
static long kmem_page_size;
static enum {
KMEM_SLAB,
KMEM_PAGE,
} kmem_default = KMEM_SLAB; /* for backward compatibility */
struct alloc_stat;
typedef int (*sort_fn_t)(void *, void *);
static int alloc_flag;
static int caller_flag;
static int alloc_lines = -1;
static int caller_lines = -1;
static bool raw_ip;
struct alloc_stat {
u64 call_site;
u64 ptr;
u64 bytes_req;
u64 bytes_alloc;
u64 last_alloc;
u32 hit;
u32 pingpong;
short alloc_cpu;
struct rb_node node;
};
static struct rb_root root_alloc_stat;
static struct rb_root root_alloc_sorted;
static struct rb_root root_caller_stat;
static struct rb_root root_caller_sorted;
static unsigned long total_requested, total_allocated, total_freed;
static unsigned long nr_allocs, nr_cross_allocs;
/* filters for controlling start and stop of time of analysis */
static struct perf_time_interval ptime;
const char *time_str;
static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
int bytes_req, int bytes_alloc, int cpu)
{
struct rb_node **node = &root_alloc_stat.rb_node;
struct rb_node *parent = NULL;
struct alloc_stat *data = NULL;
while (*node) {
parent = *node;
data = rb_entry(*node, struct alloc_stat, node);
if (ptr > data->ptr)
node = &(*node)->rb_right;
else if (ptr < data->ptr)
node = &(*node)->rb_left;
else
break;
}
if (data && data->ptr == ptr) {
data->hit++;
data->bytes_req += bytes_req;
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
if (!data) {
pr_err("%s: malloc failed\n", __func__);
return -1;
}
data->ptr = ptr;
data->pingpong = 0;
data->hit = 1;
data->bytes_req = bytes_req;
data->bytes_alloc = bytes_alloc;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &root_alloc_stat);
}
data->call_site = call_site;
data->alloc_cpu = cpu;
data->last_alloc = bytes_alloc;
return 0;
}
static int insert_caller_stat(unsigned long call_site,
int bytes_req, int bytes_alloc)
{
struct rb_node **node = &root_caller_stat.rb_node;
struct rb_node *parent = NULL;
struct alloc_stat *data = NULL;
while (*node) {
parent = *node;
data = rb_entry(*node, struct alloc_stat, node);
if (call_site > data->call_site)
node = &(*node)->rb_right;
else if (call_site < data->call_site)
node = &(*node)->rb_left;
else
break;
}
if (data && data->call_site == call_site) {
data->hit++;
data->bytes_req += bytes_req;
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
if (!data) {
pr_err("%s: malloc failed\n", __func__);
return -1;
}
data->call_site = call_site;
data->pingpong = 0;
data->hit = 1;
data->bytes_req = bytes_req;
data->bytes_alloc = bytes_alloc;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &root_caller_stat);
}
return 0;
}
static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *sample)
{
unsigned long ptr = evsel__intval(evsel, sample, "ptr"),
call_site = evsel__intval(evsel, sample, "call_site");
int bytes_req = evsel__intval(evsel, sample, "bytes_req"),
bytes_alloc = evsel__intval(evsel, sample, "bytes_alloc");
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
insert_caller_stat(call_site, bytes_req, bytes_alloc))
return -1;
total_requested += bytes_req;
total_allocated += bytes_alloc;
nr_allocs++;
/*
* Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
* version of tracepoints") adds the field "node" into the
* tracepoints 'kmalloc' and 'kmem_cache_alloc'.
*
* The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
* also contain the field "node".
*
* If the tracepoint contains the field "node" the tool stats the
* cross allocation.
*/
if (evsel__field(evsel, "node")) {
int node1, node2;
node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
node2 = evsel__intval(evsel, sample, "node");
/*
* If the field "node" is NUMA_NO_NODE (-1), we don't take it
* as a cross allocation.
*/
if ((node2 != NUMA_NO_NODE) && (node1 != node2))
nr_cross_allocs++;
}
return 0;
}
static int ptr_cmp(void *, void *);
static int slab_callsite_cmp(void *, void *);
static struct alloc_stat *search_alloc_stat(unsigned long ptr,
unsigned long call_site,
struct rb_root *root,
sort_fn_t sort_fn)
{
struct rb_node *node = root->rb_node;
struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
while (node) {
struct alloc_stat *data;
int cmp;
data = rb_entry(node, struct alloc_stat, node);
cmp = sort_fn(&key, data);
if (cmp < 0)
node = node->rb_left;
else if (cmp > 0)
node = node->rb_right;
else
return data;
}
return NULL;
}
static int evsel__process_free_event(struct evsel *evsel, struct perf_sample *sample)
{
unsigned long ptr = evsel__intval(evsel, sample, "ptr");
struct alloc_stat *s_alloc, *s_caller;
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
if (!s_alloc)
return 0;
total_freed += s_alloc->last_alloc;
if ((short)sample->cpu != s_alloc->alloc_cpu) {
s_alloc->pingpong++;
s_caller = search_alloc_stat(0, s_alloc->call_site,
&root_caller_stat,
slab_callsite_cmp);
if (!s_caller)
return -1;
s_caller->pingpong++;
}
s_alloc->alloc_cpu = -1;
return 0;
}
static u64 total_page_alloc_bytes;
static u64 total_page_free_bytes;
static u64 total_page_nomatch_bytes;
static u64 total_page_fail_bytes;
static unsigned long nr_page_allocs;
static unsigned long nr_page_frees;
static unsigned long nr_page_fails;
static unsigned long nr_page_nomatch;
static bool use_pfn;
static bool live_page;
static struct perf_session *kmem_session;
#define MAX_MIGRATE_TYPES 6
#define MAX_PAGE_ORDER 11
static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
struct page_stat {
struct rb_node node;
u64 page;
u64 callsite;
int order;
unsigned gfp_flags;
unsigned migrate_type;
u64 alloc_bytes;
u64 free_bytes;
int nr_alloc;
int nr_free;
};
static struct rb_root page_live_tree;
static struct rb_root page_alloc_tree;
static struct rb_root page_alloc_sorted;
static struct rb_root page_caller_tree;
static struct rb_root page_caller_sorted;
struct alloc_func {
u64 start;
u64 end;
char *name;
};
static int nr_alloc_funcs;
static struct alloc_func *alloc_func_list;
static int funcmp(const void *a, const void *b)
{
const struct alloc_func *fa = a;
const struct alloc_func *fb = b;
if (fa->start > fb->start)
return 1;
else
return -1;
}
static int callcmp(const void *a, const void *b)
{
const struct alloc_func *fa = a;
const struct alloc_func *fb = b;
if (fb->start <= fa->start && fa->end < fb->end)
return 0;
if (fa->start > fb->start)
return 1;
else
return -1;
}
static int build_alloc_func_list(void)
{
int ret;
struct map *kernel_map;
struct symbol *sym;
struct rb_node *node;
struct alloc_func *func;
struct machine *machine = &kmem_session->machines.host;
regex_t alloc_func_regex;
static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
if (ret) {
char err[BUFSIZ];
regerror(ret, &alloc_func_regex, err, sizeof(err));
pr_err("Invalid regex: %s\n%s", pattern, err);
return -EINVAL;
}
kernel_map = machine__kernel_map(machine);
if (map__load(kernel_map) < 0) {
pr_err("cannot load kernel map\n");
return -ENOENT;
}
map__for_each_symbol(kernel_map, sym, node) {
if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
continue;
func = realloc(alloc_func_list,
(nr_alloc_funcs + 1) * sizeof(*func));
if (func == NULL)
return -ENOMEM;
pr_debug("alloc func: %s\n", sym->name);
func[nr_alloc_funcs].start = sym->start;
func[nr_alloc_funcs].end = sym->end;
func[nr_alloc_funcs].name = sym->name;
alloc_func_list = func;
nr_alloc_funcs++;
}
qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
regfree(&alloc_func_regex);
return 0;
}
/*
* Find first non-memory allocation function from callchain.
* The allocation functions are in the 'alloc_func_list'.
*/
static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
{
struct addr_location al;
struct machine *machine = &kmem_session->machines.host;
struct callchain_cursor_node *node;
struct callchain_cursor *cursor;
u64 result = sample->ip;
addr_location__init(&al);
if (alloc_func_list == NULL) {
if (build_alloc_func_list() < 0)
goto out;
}
al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
cursor = get_tls_callchain_cursor();
if (cursor == NULL)
goto out;
sample__resolve_callchain(sample, cursor, NULL, evsel, &al, 16);
callchain_cursor_commit(cursor);
while (true) {
struct alloc_func key, *caller;
u64 addr;
node = callchain_cursor_current(cursor);
if (node == NULL)
break;
key.start = key.end = node->ip;
caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
sizeof(key), callcmp);
if (!caller) {
/* found */
if (node->ms.map)
addr = map__dso_unmap_ip(node->ms.map, node->ip);
else
addr = node->ip;
result = addr;
goto out;
} else
pr_debug3("skipping alloc function: %s\n", caller->name);
callchain_cursor_advance(cursor);
}
pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
out:
addr_location__exit(&al);
return result;
}
struct sort_dimension {
const char name[20];
sort_fn_t cmp;
struct list_head list;
};
static LIST_HEAD(page_alloc_sort_input);
static LIST_HEAD(page_caller_sort_input);
static struct page_stat *
__page_stat__findnew_page(struct page_stat *pstat, bool create)
{
struct rb_node **node = &page_live_tree.rb_node;
struct rb_node *parent = NULL;
struct page_stat *data;
while (*node) {
s64 cmp;
parent = *node;
data = rb_entry(*node, struct page_stat, node);
cmp = data->page - pstat->page;
if (cmp < 0)
node = &parent->rb_left;
else if (cmp > 0)
node = &parent->rb_right;
else
return data;
}
if (!create)
return NULL;
data = zalloc(sizeof(*data));
if (data != NULL) {
data->page = pstat->page;
data->order = pstat->order;
data->gfp_flags = pstat->gfp_flags;
data->migrate_type = pstat->migrate_type;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &page_live_tree);
}
return data;
}
static struct page_stat *page_stat__find_page(struct page_stat *pstat)
{
return __page_stat__findnew_page(pstat, false);
}
static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
{
return __page_stat__findnew_page(pstat, true);
}
static struct page_stat *
__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
{
struct rb_node **node = &page_alloc_tree.rb_node;
struct rb_node *parent = NULL;
struct page_stat *data;
struct sort_dimension *sort;
while (*node) {
int cmp = 0;
parent = *node;
data = rb_entry(*node, struct page_stat, node);
list_for_each_entry(sort, &page_alloc_sort_input, list) {
cmp = sort->cmp(pstat, data);
if (cmp)
break;
}
if (cmp < 0)
node = &parent->rb_left;
else if (cmp > 0)
node = &parent->rb_right;
else
return data;
}
if (!create)
return NULL;
data = zalloc(sizeof(*data));
if (data != NULL) {
data->page = pstat->page;
data->order = pstat->order;
data->gfp_flags = pstat->gfp_flags;
data->migrate_type = pstat->migrate_type;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &page_alloc_tree);
}
return data;
}
static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
{
return __page_stat__findnew_alloc(pstat, false);
}
static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
{
return __page_stat__findnew_alloc(pstat, true);
}
static struct page_stat *
__page_stat__findnew_caller(struct page_stat *pstat, bool create)
{
struct rb_node **node = &page_caller_tree.rb_node;
struct rb_node *parent = NULL;
struct page_stat *data;
struct sort_dimension *sort;
while (*node) {
int cmp = 0;
parent = *node;
data = rb_entry(*node, struct page_stat, node);
list_for_each_entry(sort, &page_caller_sort_input, list) {
cmp = sort->cmp(pstat, data);
if (cmp)
break;
}
if (cmp < 0)
node = &parent->rb_left;
else if (cmp > 0)
node = &parent->rb_right;
else
return data;
}
if (!create)
return NULL;
data = zalloc(sizeof(*data));
if (data != NULL) {
data->callsite = pstat->callsite;
data->order = pstat->order;
data->gfp_flags = pstat->gfp_flags;
data->migrate_type = pstat->migrate_type;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &page_caller_tree);
}
return data;
}
static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
{
return __page_stat__findnew_caller(pstat, false);
}
static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
{
return __page_stat__findnew_caller(pstat, true);
}
static bool valid_page(u64 pfn_or_page)
{
if (use_pfn && pfn_or_page == -1UL)
return false;
if (!use_pfn && pfn_or_page == 0)
return false;
return true;
}
struct gfp_flag {
unsigned int flags;
char *compact_str;
char *human_readable;
};
static struct gfp_flag *gfps;
static int nr_gfps;
static int gfpcmp(const void *a, const void *b)
{
const struct gfp_flag *fa = a;
const struct gfp_flag *fb = b;
return fa->flags - fb->flags;
}
/* see include/trace/events/mmflags.h */
static const struct {
const char *original;
const char *compact;
} gfp_compact_table[] = {
{ "GFP_TRANSHUGE", "THP" },
{ "GFP_TRANSHUGE_LIGHT", "THL" },
{ "GFP_HIGHUSER_MOVABLE", "HUM" },
{ "GFP_HIGHUSER", "HU" },
{ "GFP_USER", "U" },
{ "GFP_KERNEL_ACCOUNT", "KAC" },
{ "GFP_KERNEL", "K" },
{ "GFP_NOFS", "NF" },
{ "GFP_ATOMIC", "A" },
{ "GFP_NOIO", "NI" },
{ "GFP_NOWAIT", "NW" },
{ "GFP_DMA", "D" },
{ "__GFP_HIGHMEM", "HM" },
{ "GFP_DMA32", "D32" },
{ "__GFP_HIGH", "H" },
{ "__GFP_IO", "I" },
{ "__GFP_FS", "F" },
{ "__GFP_NOWARN", "NWR" },
{ "__GFP_RETRY_MAYFAIL", "R" },
{ "__GFP_NOFAIL", "NF" },
{ "__GFP_NORETRY", "NR" },
{ "__GFP_COMP", "C" },
{ "__GFP_ZERO", "Z" },
{ "__GFP_NOMEMALLOC", "NMA" },
{ "__GFP_MEMALLOC", "MA" },
{ "__GFP_HARDWALL", "HW" },
{ "__GFP_THISNODE", "TN" },
{ "__GFP_RECLAIMABLE", "RC" },
{ "__GFP_MOVABLE", "M" },
{ "__GFP_ACCOUNT", "AC" },
{ "__GFP_WRITE", "WR" },
{ "__GFP_RECLAIM", "R" },
{ "__GFP_DIRECT_RECLAIM", "DR" },
{ "__GFP_KSWAPD_RECLAIM", "KR" },
};
static size_t max_gfp_len;
static char *compact_gfp_flags(char *gfp_flags)
{
char *orig_flags = strdup(gfp_flags);
char *new_flags = NULL;
char *str, *pos = NULL;
size_t len = 0;
if (orig_flags == NULL)
return NULL;
str = strtok_r(orig_flags, "|", &pos);
while (str) {
size_t i;
char *new;
const char *cpt;
for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
if (strcmp(gfp_compact_table[i].original, str))
continue;
cpt = gfp_compact_table[i].compact;
new = realloc(new_flags, len + strlen(cpt) + 2);
if (new == NULL) {
free(new_flags);
free(orig_flags);
return NULL;
}
new_flags = new;
if (!len) {
strcpy(new_flags, cpt);
} else {
strcat(new_flags, "|");
strcat(new_flags, cpt);
len++;
}
len += strlen(cpt);
}
str = strtok_r(NULL, "|", &pos);
}
if (max_gfp_len < len)
max_gfp_len = len;
free(orig_flags);
return new_flags;
}
static char *compact_gfp_string(unsigned long gfp_flags)
{
struct gfp_flag key = {
.flags = gfp_flags,
};
struct gfp_flag *gfp;
gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
if (gfp)
return gfp->compact_str;
return NULL;
}
static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
unsigned int gfp_flags)
{
struct tep_record record = {
.cpu = sample->cpu,
.data = sample->raw_data,
.size = sample->raw_size,
};
struct trace_seq seq;
char *str, *pos = NULL;
if (nr_gfps) {
struct gfp_flag key = {
.flags = gfp_flags,
};
if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
return 0;
}
trace_seq_init(&seq);
tep_print_event(evsel->tp_format->tep,
&seq, &record, "%s", TEP_PRINT_INFO);
str = strtok_r(seq.buffer, " ", &pos);
while (str) {
if (!strncmp(str, "gfp_flags=", 10)) {
struct gfp_flag *new;
new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
if (new == NULL)
return -ENOMEM;
gfps = new;
new += nr_gfps++;
new->flags = gfp_flags;
new->human_readable = strdup(str + 10);
new->compact_str = compact_gfp_flags(str + 10);
if (!new->human_readable || !new->compact_str)
return -ENOMEM;
qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
}
str = strtok_r(NULL, " ", &pos);
}
trace_seq_destroy(&seq);
return 0;
}
static int evsel__process_page_alloc_event(struct evsel *evsel, struct perf_sample *sample)
{
u64 page;
unsigned int order = evsel__intval(evsel, sample, "order");
unsigned int gfp_flags = evsel__intval(evsel, sample, "gfp_flags");
unsigned int migrate_type = evsel__intval(evsel, sample,
"migratetype");
u64 bytes = kmem_page_size << order;
u64 callsite;
struct page_stat *pstat;
struct page_stat this = {
.order = order,
.gfp_flags = gfp_flags,
.migrate_type = migrate_type,
};
if (use_pfn)
page = evsel__intval(evsel, sample, "pfn");
else
page = evsel__intval(evsel, sample, "page");
nr_page_allocs++;
total_page_alloc_bytes += bytes;
if (!valid_page(page)) {
nr_page_fails++;
total_page_fail_bytes += bytes;
return 0;
}
if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
return -1;
callsite = find_callsite(evsel, sample);
/*
* This is to find the current page (with correct gfp flags and
* migrate type) at free event.
*/
this.page = page;
pstat = page_stat__findnew_page(&this);
if (pstat == NULL)
return -ENOMEM;
pstat->nr_alloc++;
pstat->alloc_bytes += bytes;
pstat->callsite = callsite;
if (!live_page) {
pstat = page_stat__findnew_alloc(&this);
if (pstat == NULL)
return -ENOMEM;
pstat->nr_alloc++;
pstat->alloc_bytes += bytes;
pstat->callsite = callsite;
}
this.callsite = callsite;
pstat = page_stat__findnew_caller(&this);
if (pstat == NULL)
return -ENOMEM;
pstat->nr_alloc++;
pstat->alloc_bytes += bytes;
order_stats[order][migrate_type]++;
return 0;
}
static int evsel__process_page_free_event(struct evsel *evsel, struct perf_sample *sample)
{
u64 page;
unsigned int order = evsel__intval(evsel, sample, "order");
u64 bytes = kmem_page_size << order;
struct page_stat *pstat;
struct page_stat this = {
.order = order,
};
if (use_pfn)
page = evsel__intval(evsel, sample, "pfn");
else
page = evsel__intval(evsel, sample, "page");
nr_page_frees++;
total_page_free_bytes += bytes;
this.page = page;
pstat = page_stat__find_page(&this);
if (pstat == NULL) {
pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
page, order);
nr_page_nomatch++;
total_page_nomatch_bytes += bytes;
return 0;
}
this.gfp_flags = pstat->gfp_flags;
this.migrate_type = pstat->migrate_type;
this.callsite = pstat->callsite;
rb_erase(&pstat->node, &page_live_tree);
free(pstat);
if (live_page) {
order_stats[this.order][this.migrate_type]--;
} else {
pstat = page_stat__find_alloc(&this);
if (pstat == NULL)
return -ENOMEM;
pstat->nr_free++;
pstat->free_bytes += bytes;
}
pstat = page_stat__find_caller(&this);
if (pstat == NULL)
return -ENOENT;
pstat->nr_free++;
pstat->free_bytes += bytes;
if (live_page) {
pstat->nr_alloc--;
pstat->alloc_bytes -= bytes;
if (pstat->nr_alloc == 0) {
rb_erase(&pstat->node, &page_caller_tree);
free(pstat);
}
}
return 0;
}
static bool perf_kmem__skip_sample(struct perf_sample *sample)
{
/* skip sample based on time? */
if (perf_time__skip_sample(&ptime, sample->time))
return true;
return false;
}
typedef int (*tracepoint_handler)(struct evsel *evsel,
struct perf_sample *sample);
static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (perf_kmem__skip_sample(sample))
return 0;
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
err = f(evsel, sample);
}
thread__put(thread);
return err;
}
static struct perf_tool perf_kmem = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.namespaces = perf_event__process_namespaces,
.ordered_events = true,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
{
if (n_alloc == 0)
return 0.0;
else
return 100.0 - (100.0 * n_req / n_alloc);
}
static void __print_slab_result(struct rb_root *root,
struct perf_session *session,
int n_lines, int is_caller)
{
struct rb_node *next;
struct machine *machine = &session->machines.host;
printf("%.105s\n", graph_dotted_line);
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
printf("%.105s\n", graph_dotted_line);
next = rb_first(root);
while (next && n_lines--) {
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
node);
struct symbol *sym = NULL;
struct map *map;
char buf[BUFSIZ];
u64 addr;
if (is_caller) {
addr = data->call_site;
if (!raw_ip)
sym = machine__find_kernel_symbol(machine, addr, &map);
} else
addr = data->ptr;
if (sym != NULL)
snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
addr - map__unmap_ip(map, sym->start));
else
snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
printf(" %-34s |", buf);
printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
(unsigned long long)data->bytes_alloc,
(unsigned long)data->bytes_alloc / data->hit,
(unsigned long long)data->bytes_req,
(unsigned long)data->bytes_req / data->hit,
(unsigned long)data->hit,
(unsigned long)data->pingpong,
fragmentation(data->bytes_req, data->bytes_alloc));
next = rb_next(next);
}
if (n_lines == -1)
printf(" ... | ... | ... | ... | ... | ... \n");
printf("%.105s\n", graph_dotted_line);
}
static const char * const migrate_type_str[] = {
"UNMOVABL",
"RECLAIM",
"MOVABLE",
"RESERVED",
"CMA/ISLT",
"UNKNOWN",
};
static void __print_page_alloc_result(struct perf_session *session, int n_lines)
{
struct rb_node *next = rb_first(&page_alloc_sorted);
struct machine *machine = &session->machines.host;
const char *format;
int gfp_len = max(strlen("GFP flags"), max_gfp_len);
printf("\n%.105s\n", graph_dotted_line);
printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
gfp_len, "GFP flags");
printf("%.105s\n", graph_dotted_line);
if (use_pfn)
format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
else
format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
while (next && n_lines--) {
struct page_stat *data;
struct symbol *sym;
struct map *map;
char buf[32];
char *caller = buf;
data = rb_entry(next, struct page_stat, node);
sym = machine__find_kernel_symbol(machine, data->callsite, &map);
if (sym)
caller = sym->name;
else
scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
printf(format, (unsigned long long)data->page,
(unsigned long long)data->alloc_bytes / 1024,
data->nr_alloc, data->order,
migrate_type_str[data->migrate_type],
gfp_len, compact_gfp_string(data->gfp_flags), caller);
next = rb_next(next);
}
if (n_lines == -1) {
printf(" ... | ... | ... | ... | ... | %-*s | ...\n",
gfp_len, "...");
}
printf("%.105s\n", graph_dotted_line);
}
static void __print_page_caller_result(struct perf_session *session, int n_lines)
{
struct rb_node *next = rb_first(&page_caller_sorted);
struct machine *machine = &session->machines.host;
int gfp_len = max(strlen("GFP flags"), max_gfp_len);
printf("\n%.105s\n", graph_dotted_line);
printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
live_page ? "Live" : "Total", gfp_len, "GFP flags");
printf("%.105s\n", graph_dotted_line);
while (next && n_lines--) {
struct page_stat *data;
struct symbol *sym;
struct map *map;
char buf[32];
char *caller = buf;
data = rb_entry(next, struct page_stat, node);
sym = machine__find_kernel_symbol(machine, data->callsite, &map);
if (sym)
caller = sym->name;
else
scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
(unsigned long long)data->alloc_bytes / 1024,
data->nr_alloc, data->order,
migrate_type_str[data->migrate_type],
gfp_len, compact_gfp_string(data->gfp_flags), caller);
next = rb_next(next);
}
if (n_lines == -1) {
printf(" ... | ... | ... | ... | %-*s | ...\n",
gfp_len, "...");
}
printf("%.105s\n", graph_dotted_line);
}
static void print_gfp_flags(void)
{
int i;
printf("#\n");
printf("# GFP flags\n");
printf("# ---------\n");
for (i = 0; i < nr_gfps; i++) {
printf("# %08x: %*s: %s\n", gfps[i].flags,
(int) max_gfp_len, gfps[i].compact_str,
gfps[i].human_readable);
}
}
static void print_slab_summary(void)
{
printf("\nSUMMARY (SLAB allocator)");
printf("\n========================\n");
printf("Total bytes requested: %'lu\n", total_requested);
printf("Total bytes allocated: %'lu\n", total_allocated);
printf("Total bytes freed: %'lu\n", total_freed);
if (total_allocated > total_freed) {
printf("Net total bytes allocated: %'lu\n",
total_allocated - total_freed);
}
printf("Total bytes wasted on internal fragmentation: %'lu\n",
total_allocated - total_requested);
printf("Internal fragmentation: %f%%\n",
fragmentation(total_requested, total_allocated));
printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
}
static void print_page_summary(void)
{
int o, m;
u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
printf("\nSUMMARY (page allocator)");
printf("\n========================\n");
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
nr_page_allocs, total_page_alloc_bytes / 1024);
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
nr_page_frees, total_page_free_bytes / 1024);
printf("\n");
printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
nr_page_allocs - nr_alloc_freed,
(total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
nr_page_nomatch, total_page_nomatch_bytes / 1024);
printf("\n");
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
nr_page_fails, total_page_fail_bytes / 1024);
printf("\n");
printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
"Reclaimable", "Movable", "Reserved", "CMA/Isolated");
printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
graph_dotted_line, graph_dotted_line, graph_dotted_line,
graph_dotted_line, graph_dotted_line);
for (o = 0; o < MAX_PAGE_ORDER; o++) {
printf("%5d", o);
for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
if (order_stats[o][m])
printf(" %'12d", order_stats[o][m]);
else
printf(" %12c", '.');
}
printf("\n");
}
}
static void print_slab_result(struct perf_session *session)
{
if (caller_flag)
__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
if (alloc_flag)
__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
print_slab_summary();
}
static void print_page_result(struct perf_session *session)
{
if (caller_flag || alloc_flag)
print_gfp_flags();
if (caller_flag)
__print_page_caller_result(session, caller_lines);
if (alloc_flag)
__print_page_alloc_result(session, alloc_lines);
print_page_summary();
}
static void print_result(struct perf_session *session)
{
if (kmem_slab)
print_slab_result(session);
if (kmem_page)
print_page_result(session);
}
static LIST_HEAD(slab_caller_sort);
static LIST_HEAD(slab_alloc_sort);
static LIST_HEAD(page_caller_sort);
static LIST_HEAD(page_alloc_sort);
static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
struct list_head *sort_list)
{
struct rb_node **new = &(root->rb_node);
struct rb_node *parent = NULL;
struct sort_dimension *sort;
while (*new) {
struct alloc_stat *this;
int cmp = 0;
this = rb_entry(*new, struct alloc_stat, node);
parent = *new;
list_for_each_entry(sort, sort_list, list) {
cmp = sort->cmp(data, this);
if (cmp)
break;
}
if (cmp > 0)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&data->node, parent, new);
rb_insert_color(&data->node, root);
}
static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
struct list_head *sort_list)
{
struct rb_node *node;
struct alloc_stat *data;
for (;;) {
node = rb_first(root);
if (!node)
break;
rb_erase(node, root);
data = rb_entry(node, struct alloc_stat, node);
sort_slab_insert(root_sorted, data, sort_list);
}
}
static void sort_page_insert(struct rb_root *root, struct page_stat *data,
struct list_head *sort_list)
{
struct rb_node **new = &root->rb_node;
struct rb_node *parent = NULL;
struct sort_dimension *sort;
while (*new) {
struct page_stat *this;
int cmp = 0;
this = rb_entry(*new, struct page_stat, node);
parent = *new;
list_for_each_entry(sort, sort_list, list) {
cmp = sort->cmp(data, this);
if (cmp)
break;
}
if (cmp > 0)
new = &parent->rb_left;
else
new = &parent->rb_right;
}
rb_link_node(&data->node, parent, new);
rb_insert_color(&data->node, root);
}
static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
struct list_head *sort_list)
{
struct rb_node *node;
struct page_stat *data;
for (;;) {
node = rb_first(root);
if (!node)
break;
rb_erase(node, root);
data = rb_entry(node, struct page_stat, node);
sort_page_insert(root_sorted, data, sort_list);
}
}
static void sort_result(void)
{
if (kmem_slab) {
__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
&slab_alloc_sort);
__sort_slab_result(&root_caller_stat, &root_caller_sorted,
&slab_caller_sort);
}
if (kmem_page) {
if (live_page)
__sort_page_result(&page_live_tree, &page_alloc_sorted,
&page_alloc_sort);
else
__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
&page_alloc_sort);
__sort_page_result(&page_caller_tree, &page_caller_sorted,
&page_caller_sort);
}
}
static int __cmd_kmem(struct perf_session *session)
{
int err = -EINVAL;
struct evsel *evsel;
const struct evsel_str_handler kmem_tracepoints[] = {
/* slab allocator */
{ "kmem:kmalloc", evsel__process_alloc_event, },
{ "kmem:kmem_cache_alloc", evsel__process_alloc_event, },
{ "kmem:kmalloc_node", evsel__process_alloc_event, },
{ "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
{ "kmem:kfree", evsel__process_free_event, },
{ "kmem:kmem_cache_free", evsel__process_free_event, },
/* page allocator */
{ "kmem:mm_page_alloc", evsel__process_page_alloc_event, },
{ "kmem:mm_page_free", evsel__process_page_free_event, },
};
if (!perf_session__has_traces(session, "kmem record"))
goto out;
if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
pr_err("Initializing perf session tracepoint handlers failed\n");
goto out;
}
evlist__for_each_entry(session->evlist, evsel) {
if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") &&
evsel__field(evsel, "pfn")) {
use_pfn = true;
break;
}
}
setup_pager();
err = perf_session__process_events(session);
if (err != 0) {
pr_err("error during process events: %d\n", err);
goto out;
}
sort_result();
print_result(session);
out:
return err;
}
/* slab sort keys */
static int ptr_cmp(void *a, void *b)
{
struct alloc_stat *l = a;
struct alloc_stat *r = b;
if (l->ptr < r->ptr)
return -1;
else if (l->ptr > r->ptr)
return 1;
return 0;
}
static struct sort_dimension ptr_sort_dimension = {
.name = "ptr",
.cmp = ptr_cmp,
};
static int slab_callsite_cmp(void *a, void *b)
{
struct alloc_stat *l = a;
struct alloc_stat *r = b;
if (l->call_site < r->call_site)
return -1;
else if (l->call_site > r->call_site)
return 1;
return 0;
}
static struct sort_dimension callsite_sort_dimension = {
.name = "callsite",
.cmp = slab_callsite_cmp,
};
static int hit_cmp(void *a, void *b)
{
struct alloc_stat *l = a;
struct alloc_stat *r = b;
if (l->hit < r->hit)
return -1;
else if (l->hit > r->hit)
return 1;
return 0;
}
static struct sort_dimension hit_sort_dimension = {
.name = "hit",
.cmp = hit_cmp,
};
static int bytes_cmp(void *a, void *b)
{
struct alloc_stat *l = a;
struct alloc_stat *r = b;
if (l->bytes_alloc < r->bytes_alloc)
return -1;
else if (l->bytes_alloc > r->bytes_alloc)
return 1;
return 0;
}
static struct sort_dimension bytes_sort_dimension = {
.name = "bytes",
.cmp = bytes_cmp,
};
static int frag_cmp(void *a, void *b)
{
double x, y;
struct alloc_stat *l = a;
struct alloc_stat *r = b;
x = fragmentation(l->bytes_req, l->bytes_alloc);
y = fragmentation(r->bytes_req, r->bytes_alloc);
if (x < y)
return -1;
else if (x > y)
return 1;
return 0;
}
static struct sort_dimension frag_sort_dimension = {
.name = "frag",
.cmp = frag_cmp,
};
static int pingpong_cmp(void *a, void *b)
{
struct alloc_stat *l = a;
struct alloc_stat *r = b;
if (l->pingpong < r->pingpong)
return -1;
else if (l->pingpong > r->pingpong)
return 1;
return 0;
}
static struct sort_dimension pingpong_sort_dimension = {
.name = "pingpong",
.cmp = pingpong_cmp,
};
/* page sort keys */
static int page_cmp(void *a, void *b)
{
struct page_stat *l = a;
struct page_stat *r = b;
if (l->page < r->page)
return -1;
else if (l->page > r->page)
return 1;
return 0;
}
static struct sort_dimension page_sort_dimension = {
.name = "page",
.cmp = page_cmp,
};
static int page_callsite_cmp(void *a, void *b)
{
struct page_stat *l = a;
struct page_stat *r = b;
if (l->callsite < r->callsite)
return -1;
else if (l->callsite > r->callsite)
return 1;
return 0;
}
static struct sort_dimension page_callsite_sort_dimension = {
.name = "callsite",
.cmp = page_callsite_cmp,
};
static int page_hit_cmp(void *a, void *b)
{
struct page_stat *l = a;
struct page_stat *r = b;
if (l->nr_alloc < r->nr_alloc)
return -1;
else if (l->nr_alloc > r->nr_alloc)
return 1;
return 0;
}
static struct sort_dimension page_hit_sort_dimension = {
.name = "hit",
.cmp = page_hit_cmp,
};
static int page_bytes_cmp(void *a, void *b)
{
struct page_stat *l = a;
struct page_stat *r = b;
if (l->alloc_bytes < r->alloc_bytes)
return -1;
else if (l->alloc_bytes > r->alloc_bytes)
return 1;
return 0;
}
static struct sort_dimension page_bytes_sort_dimension = {
.name = "bytes",
.cmp = page_bytes_cmp,
};
static int page_order_cmp(void *a, void *b)
{
struct page_stat *l = a;
struct page_stat *r = b;
if (l->order < r->order)
return -1;
else if (l->order > r->order)
return 1;
return 0;
}
static struct sort_dimension page_order_sort_dimension = {
.name = "order",
.cmp = page_order_cmp,
};
static int migrate_type_cmp(void *a, void *b)
{
struct page_stat *l = a;
struct page_stat *r = b;
/* for internal use to find free'd page */
if (l->migrate_type == -1U)
return 0;
if (l->migrate_type < r->migrate_type)
return -1;
else if (l->migrate_type > r->migrate_type)
return 1;
return 0;
}
static struct sort_dimension migrate_type_sort_dimension = {
.name = "migtype",
.cmp = migrate_type_cmp,
};
static int gfp_flags_cmp(void *a, void *b)
{
struct page_stat *l = a;
struct page_stat *r = b;
/* for internal use to find free'd page */
if (l->gfp_flags == -1U)
return 0;
if (l->gfp_flags < r->gfp_flags)
return -1;
else if (l->gfp_flags > r->gfp_flags)
return 1;
return 0;
}
static struct sort_dimension gfp_flags_sort_dimension = {
.name = "gfp",
.cmp = gfp_flags_cmp,
};
static struct sort_dimension *slab_sorts[] = {
&ptr_sort_dimension,
&callsite_sort_dimension,
&hit_sort_dimension,
&bytes_sort_dimension,
&frag_sort_dimension,
&pingpong_sort_dimension,
};
static struct sort_dimension *page_sorts[] = {
&page_sort_dimension,
&page_callsite_sort_dimension,
&page_hit_sort_dimension,
&page_bytes_sort_dimension,
&page_order_sort_dimension,
&migrate_type_sort_dimension,
&gfp_flags_sort_dimension,
};
static int slab_sort_dimension__add(const char *tok, struct list_head *list)
{
struct sort_dimension *sort;
int i;
for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
if (!strcmp(slab_sorts[i]->name, tok)) {
sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
if (!sort) {
pr_err("%s: memdup failed\n", __func__);
return -1;
}
list_add_tail(&sort->list, list);
return 0;
}
}
return -1;
}
static int page_sort_dimension__add(const char *tok, struct list_head *list)
{
struct sort_dimension *sort;
int i;
for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
if (!strcmp(page_sorts[i]->name, tok)) {
sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
if (!sort) {
pr_err("%s: memdup failed\n", __func__);
return -1;
}
list_add_tail(&sort->list, list);
return 0;
}
}
return -1;
}
static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
{
char *tok;
char *str = strdup(arg);
char *pos = str;
if (!str) {
pr_err("%s: strdup failed\n", __func__);
return -1;
}
while (true) {
tok = strsep(&pos, ",");
if (!tok)
break;
if (slab_sort_dimension__add(tok, sort_list) < 0) {
pr_err("Unknown slab --sort key: '%s'", tok);
free(str);
return -1;
}
}
free(str);
return 0;
}
static int setup_page_sorting(struct list_head *sort_list, const char *arg)
{
char *tok;
char *str = strdup(arg);
char *pos = str;
if (!str) {
pr_err("%s: strdup failed\n", __func__);
return -1;
}
while (true) {
tok = strsep(&pos, ",");
if (!tok)
break;
if (page_sort_dimension__add(tok, sort_list) < 0) {
pr_err("Unknown page --sort key: '%s'", tok);
free(str);
return -1;
}
}
free(str);
return 0;
}
static int parse_sort_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
if (!arg)
return -1;
if (kmem_page > kmem_slab ||
(kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
if (caller_flag > alloc_flag)
return setup_page_sorting(&page_caller_sort, arg);
else
return setup_page_sorting(&page_alloc_sort, arg);
} else {
if (caller_flag > alloc_flag)
return setup_slab_sorting(&slab_caller_sort, arg);
else
return setup_slab_sorting(&slab_alloc_sort, arg);
}
return 0;
}
static int parse_caller_opt(const struct option *opt __maybe_unused,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
caller_flag = (alloc_flag + 1);
return 0;
}
static int parse_alloc_opt(const struct option *opt __maybe_unused,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
alloc_flag = (caller_flag + 1);
return 0;
}
static int parse_slab_opt(const struct option *opt __maybe_unused,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
kmem_slab = (kmem_page + 1);
return 0;
}
static int parse_page_opt(const struct option *opt __maybe_unused,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
kmem_page = (kmem_slab + 1);
return 0;
}
static int parse_line_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
int lines;
if (!arg)
return -1;
lines = strtoul(arg, NULL, 10);
if (caller_flag > alloc_flag)
caller_lines = lines;
else
alloc_lines = lines;
return 0;
}
static bool slab_legacy_tp_is_exposed(void)
{
/*
* The tracepoints "kmem:kmalloc_node" and
* "kmem:kmem_cache_alloc_node" have been removed on the latest
* kernel, if the tracepoint "kmem:kmalloc_node" is existed it
* means the tool is running on an old kernel, we need to
* rollback to support these legacy tracepoints.
*/
return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
false : true;
}
static int __cmd_record(int argc, const char **argv)
{
const char * const record_args[] = {
"record", "-a", "-R", "-c", "1",
};
const char * const slab_events[] = {
"-e", "kmem:kmalloc",
"-e", "kmem:kfree",
"-e", "kmem:kmem_cache_alloc",
"-e", "kmem:kmem_cache_free",
};
const char * const slab_legacy_events[] = {
"-e", "kmem:kmalloc_node",
"-e", "kmem:kmem_cache_alloc_node",
};
const char * const page_events[] = {
"-e", "kmem:mm_page_alloc",
"-e", "kmem:mm_page_free",
};
unsigned int rec_argc, i, j;
const char **rec_argv;
unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
if (kmem_slab) {
rec_argc += ARRAY_SIZE(slab_events);
if (slab_legacy_tp_exposed)
rec_argc += ARRAY_SIZE(slab_legacy_events);
}
if (kmem_page)
rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
if (kmem_slab) {
for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
rec_argv[i] = strdup(slab_events[j]);
if (slab_legacy_tp_exposed) {
for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
rec_argv[i] = strdup(slab_legacy_events[j]);
}
}
if (kmem_page) {
rec_argv[i++] = strdup("-g");
for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
rec_argv[i] = strdup(page_events[j]);
}
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
return cmd_record(i, rec_argv);
}
static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
{
if (!strcmp(var, "kmem.default")) {
if (!strcmp(value, "slab"))
kmem_default = KMEM_SLAB;
else if (!strcmp(value, "page"))
kmem_default = KMEM_PAGE;
else
pr_err("invalid default value ('slab' or 'page' required): %s\n",
value);
return 0;
}
return 0;
}
int cmd_kmem(int argc, const char **argv)
{
const char * const default_slab_sort = "frag,hit,bytes";
const char * const default_page_sort = "bytes,hit";
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
const struct option kmem_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
"show per-callsite statistics", parse_caller_opt),
OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
"show per-allocation statistics", parse_alloc_opt),
OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
"sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
"page, order, migtype, gfp", parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
parse_slab_opt),
OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
parse_page_opt),
OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
OPT_STRING(0, "time", &time_str, "str",
"Time span of interest (start,stop)"),
OPT_END()
};
const char *const kmem_subcommands[] = { "record", "stat", NULL };
const char *kmem_usage[] = {
NULL,
NULL
};
struct perf_session *session;
static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
int ret = perf_config(kmem_config, NULL);
if (ret)
return ret;
argc = parse_options_subcommand(argc, argv, kmem_options,
kmem_subcommands, kmem_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(kmem_usage, kmem_options);
if (kmem_slab == 0 && kmem_page == 0) {
if (kmem_default == KMEM_SLAB)
kmem_slab = 1;
else
kmem_page = 1;
}
if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
symbol__init(NULL);
return __cmd_record(argc, argv);
}
data.path = input_name;
kmem_session = session = perf_session__new(&data, &perf_kmem);
if (IS_ERR(session))
return PTR_ERR(session);
ret = -1;
if (kmem_slab) {
if (!evlist__find_tracepoint_by_name(session->evlist, "kmem:kmalloc")) {
pr_err(errmsg, "slab", "slab");
goto out_delete;
}
}
if (kmem_page) {
struct evsel *evsel = evlist__find_tracepoint_by_name(session->evlist, "kmem:mm_page_alloc");
if (evsel == NULL) {
pr_err(errmsg, "page", "page");
goto out_delete;
}
kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
symbol_conf.use_callchain = true;
}
symbol__init(&session->header.env);
if (perf_time__parse_str(&ptime, time_str) != 0) {
pr_err("Invalid time string\n");
ret = -EINVAL;
goto out_delete;
}
if (!strcmp(argv[0], "stat")) {
setlocale(LC_ALL, "");
if (cpu__setup_cpunode_map())
goto out_delete;
if (list_empty(&slab_caller_sort))
setup_slab_sorting(&slab_caller_sort, default_slab_sort);
if (list_empty(&slab_alloc_sort))
setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
if (list_empty(&page_caller_sort))
setup_page_sorting(&page_caller_sort, default_page_sort);
if (list_empty(&page_alloc_sort))
setup_page_sorting(&page_alloc_sort, default_page_sort);
if (kmem_page) {
setup_page_sorting(&page_alloc_sort_input,
"page,order,migtype,gfp");
setup_page_sorting(&page_caller_sort_input,
"callsite,order,migtype,gfp");
}
ret = __cmd_kmem(session);
} else
usage_with_options(kmem_usage, kmem_options);
out_delete:
perf_session__delete(session);
return ret;
}
| linux-master | tools/perf/builtin-kmem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-annotate.c
*
* Builtin annotate command: Analyze the perf.data input file,
* look up and read DSOs and symbol information and display
* a histogram of results, along various sorting keys.
*/
#include "builtin.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
#include <linux/rbtree.h>
#include <linux/zalloc.h>
#include "util/symbol.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/annotate.h"
#include "util/event.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/sort.h"
#include "util/hist.h"
#include "util/dso.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
#include "arch/common.h"
#include "util/block-range.h"
#include "util/map_symbol.h"
#include "util/branch.h"
#include "util/util.h"
#include <dlfcn.h>
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/err.h>
struct perf_annotate {
struct perf_tool tool;
struct perf_session *session;
struct annotation_options opts;
#ifdef HAVE_SLANG_SUPPORT
bool use_tui;
#endif
bool use_stdio, use_stdio2;
#ifdef HAVE_GTK2_SUPPORT
bool use_gtk;
#endif
bool skip_missing;
bool has_br_stack;
bool group_set;
float min_percent;
const char *sym_hist_filter;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
/*
* Given one basic block:
*
* from to branch_i
* * ----> *
* |
* | block
* v
* * ----> *
* from to branch_i+1
*
* where the horizontal are the branches and the vertical is the executed
* block of instructions.
*
* We count, for each 'instruction', the number of blocks that covered it as
* well as count the ratio each branch is taken.
*
* We can do this without knowing the actual instruction stream by keeping
* track of the address ranges. We break down ranges such that there is no
* overlap and iterate from the start until the end.
*
* @acme: once we parse the objdump output _before_ processing the samples,
* we can easily fold the branch.cycles IPC bits in.
*/
static void process_basic_block(struct addr_map_symbol *start,
struct addr_map_symbol *end,
struct branch_flags *flags)
{
struct symbol *sym = start->ms.sym;
struct annotation *notes = sym ? symbol__annotation(sym) : NULL;
struct block_range_iter iter;
struct block_range *entry;
/*
* Sanity; NULL isn't executable and the CPU cannot execute backwards
*/
if (!start->addr || start->addr > end->addr)
return;
iter = block_range__create(start->addr, end->addr);
if (!block_range_iter__valid(&iter))
return;
/*
* First block in range is a branch target.
*/
entry = block_range_iter(&iter);
assert(entry->is_target);
entry->entry++;
do {
entry = block_range_iter(&iter);
entry->coverage++;
entry->sym = sym;
if (notes)
notes->max_coverage = max(notes->max_coverage, entry->coverage);
} while (block_range_iter__next(&iter));
/*
* Last block in rage is a branch.
*/
entry = block_range_iter(&iter);
assert(entry->is_branch);
entry->taken++;
if (flags->predicted)
entry->pred++;
}
static void process_branch_stack(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample)
{
struct addr_map_symbol *prev = NULL;
struct branch_info *bi;
int i;
if (!bs || !bs->nr)
return;
bi = sample__resolve_bstack(sample, al);
if (!bi)
return;
for (i = bs->nr - 1; i >= 0; i--) {
/*
* XXX filter against symbol
*/
if (prev)
process_basic_block(prev, &bi[i].from, &bi[i].flags);
prev = &bi[i].to;
}
free(bi);
}
static int hist_iter__branch_callback(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused,
bool single __maybe_unused,
void *arg __maybe_unused)
{
struct hist_entry *he = iter->he;
struct branch_info *bi;
struct perf_sample *sample = iter->sample;
struct evsel *evsel = iter->evsel;
int err;
bi = he->branch_info;
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
if (err)
goto out;
err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
out:
return err;
}
static int process_branch_callback(struct evsel *evsel,
struct perf_sample *sample,
struct addr_location *al,
struct perf_annotate *ann,
struct machine *machine)
{
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = sample,
.add_entry_cb = hist_iter__branch_callback,
.hide_unresolved = symbol_conf.hide_unresolved,
.ops = &hist_iter_branch,
};
struct addr_location a;
int ret;
addr_location__init(&a);
if (machine__resolve(machine, &a, sample) < 0) {
ret = -1;
goto out;
}
if (a.sym == NULL) {
ret = 0;
goto out;
}
if (a.map != NULL)
map__dso(a.map)->hit = 1;
hist__account_cycles(sample->branch_stack, al, sample, false, NULL);
ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
out:
addr_location__exit(&a);
return ret;
}
static bool has_annotation(struct perf_annotate *ann)
{
return ui__has_annotation() || ann->use_stdio2;
}
static int evsel__add_sample(struct evsel *evsel, struct perf_sample *sample,
struct addr_location *al, struct perf_annotate *ann,
struct machine *machine)
{
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
int ret;
if ((!ann->has_br_stack || !has_annotation(ann)) &&
ann->sym_hist_filter != NULL &&
(al->sym == NULL ||
strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
/*
* FIXME: why isn't this done in the symbol_filter when loading
* the DSO?
*/
if (al->sym != NULL) {
struct dso *dso = map__dso(al->map);
rb_erase_cached(&al->sym->rb_node, &dso->symbols);
symbol__delete(al->sym);
dso__reset_find_symbol_cache(dso);
}
return 0;
}
/*
* XXX filtered samples can still have branch entries pointing into our
* symbol and are missed.
*/
process_branch_stack(sample->branch_stack, al, sample);
if (ann->has_br_stack && has_annotation(ann))
return process_branch_callback(evsel, sample, al, ann, machine);
he = hists__add_entry(hists, al, NULL, NULL, NULL, NULL, sample, true);
if (he == NULL)
return -ENOMEM;
ret = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
hists__inc_nr_samples(hists, true);
return ret;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
int ret = 0;
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
ret = -1;
goto out_put;
}
if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
goto out_put;
if (!al.filtered &&
evsel__add_sample(evsel, sample, &al, ann, machine)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
ret = -1;
}
out_put:
addr_location__exit(&al);
return ret;
}
static int process_feature_event(struct perf_session *session,
union perf_event *event)
{
if (event->feat.feat_id < HEADER_LAST_FEATURE)
return perf_event__process_feature(session, event);
return 0;
}
static int hist_entry__tty_annotate(struct hist_entry *he,
struct evsel *evsel,
struct perf_annotate *ann)
{
if (!ann->use_stdio2)
return symbol__tty_annotate(&he->ms, evsel, &ann->opts);
return symbol__tty_annotate2(&he->ms, evsel, &ann->opts);
}
static void hists__find_annotations(struct hists *hists,
struct evsel *evsel,
struct perf_annotate *ann)
{
struct rb_node *nd = rb_first_cached(&hists->entries), *next;
int key = K_RIGHT;
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct annotation *notes;
if (he->ms.sym == NULL || map__dso(he->ms.map)->annotate_warned)
goto find_next;
if (ann->sym_hist_filter &&
(strcmp(he->ms.sym->name, ann->sym_hist_filter) != 0))
goto find_next;
if (ann->min_percent) {
float percent = 0;
u64 total = hists__total_period(hists);
if (total)
percent = 100.0 * he->stat.period / total;
if (percent < ann->min_percent)
goto find_next;
}
notes = symbol__annotation(he->ms.sym);
if (notes->src == NULL) {
find_next:
if (key == K_LEFT || key == '<')
nd = rb_prev(nd);
else
nd = rb_next(nd);
continue;
}
if (use_browser == 2) {
int ret;
int (*annotate)(struct hist_entry *he,
struct evsel *evsel,
struct annotation_options *options,
struct hist_browser_timer *hbt);
annotate = dlsym(perf_gtk_handle,
"hist_entry__gtk_annotate");
if (annotate == NULL) {
ui__error("GTK browser not found!\n");
return;
}
ret = annotate(he, evsel, &ann->opts, NULL);
if (!ret || !ann->skip_missing)
return;
/* skip missing symbols */
nd = rb_next(nd);
} else if (use_browser == 1) {
key = hist_entry__tui_annotate(he, evsel, NULL, &ann->opts);
switch (key) {
case -1:
if (!ann->skip_missing)
return;
/* fall through */
case K_RIGHT:
case '>':
next = rb_next(nd);
break;
case K_LEFT:
case '<':
next = rb_prev(nd);
break;
default:
return;
}
if (next != NULL)
nd = next;
} else {
hist_entry__tty_annotate(he, evsel, ann);
nd = rb_next(nd);
}
}
}
static int __cmd_annotate(struct perf_annotate *ann)
{
int ret;
struct perf_session *session = ann->session;
struct evsel *pos;
u64 total_nr_samples;
if (ann->cpu_list) {
ret = perf_session__cpu_bitmap(session, ann->cpu_list,
ann->cpu_bitmap);
if (ret)
goto out;
}
if (!ann->opts.objdump_path) {
ret = perf_env__lookup_objdump(&session->header.env,
&ann->opts.objdump_path);
if (ret)
goto out;
}
ret = perf_session__process_events(session);
if (ret)
goto out;
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout, false);
evlist__fprintf_nr_events(session->evlist, stdout, false);
goto out;
}
if (verbose > 3)
perf_session__fprintf(session, stdout);
if (verbose > 2)
perf_session__fprintf_dsos(session, stdout);
total_nr_samples = 0;
evlist__for_each_entry(session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
u32 nr_samples = hists->stats.nr_samples;
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
/* Don't sort callchain */
evsel__reset_sample_bit(pos, CALLCHAIN);
evsel__output_resort(pos, NULL);
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
hists__find_annotations(hists, pos, ann);
}
}
if (total_nr_samples == 0) {
ui__error("The %s data has no samples!\n", session->data->path);
goto out;
}
if (use_browser == 2) {
void (*show_annotations)(void);
show_annotations = dlsym(perf_gtk_handle,
"perf_gtk__show_annotations");
if (show_annotations == NULL) {
ui__error("GTK browser not found!\n");
goto out;
}
show_annotations();
}
out:
return ret;
}
static int parse_percent_limit(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct perf_annotate *ann = opt->value;
double pcnt = strtof(str, NULL);
ann->min_percent = pcnt;
return 0;
}
static const char * const annotate_usage[] = {
"perf annotate [<options>]",
NULL
};
int cmd_annotate(int argc, const char **argv)
{
struct perf_annotate annotate = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.namespaces = perf_event__process_namespaces,
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
#ifdef HAVE_LIBTRACEEVENT
.tracing_data = perf_event__process_tracing_data,
#endif
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
struct itrace_synth_opts itrace_synth_opts = {
.set = 0,
};
const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
"symbol to annotate"),
OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "do now show any warnings or messages"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
#ifdef HAVE_GTK2_SUPPORT
OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"),
#endif
#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
#endif
OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
OPT_BOOLEAN(0, "stdio2", &annotate.use_stdio2, "Use the stdio interface"),
OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
"don't load vmlinux even if found"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &annotate.opts.print_lines,
"print matching source lines (may be slow)"),
OPT_BOOLEAN('P', "full-paths", &annotate.opts.full_path,
"Don't shorten the displayed pathnames"),
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
"Skip symbols that cannot be annotated"),
OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group,
&annotate.group_set,
"Show event group information together"),
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
OPT_BOOLEAN(0, "source", &annotate.opts.annotate_src,
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "prefix", &annotate.opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
OPT_STRING(0, "prefix-strip", &annotate.opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING(0, "addr2line", &addr2line_path, "path",
"addr2line binary to use for line numbers"),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Enable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
"Show event group information together"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
"'always' (default), 'never' or 'auto' only applicable to --stdio mode",
stdio__config_color, "always"),
OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
OPT_CALLBACK(0, "percent-limit", &annotate, "percent",
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
itrace_parse_synth_opts),
OPT_END()
};
int ret;
set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE);
annotation_options__init(&annotate.opts);
ret = hists__init();
if (ret < 0)
return ret;
annotation_config__init(&annotate.opts);
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (argc) {
/*
* Special case: if there's an argument left then assume that
* it's a symbol filter:
*/
if (argc > 1)
usage_with_options(annotate_usage, options);
annotate.sym_hist_filter = argv[0];
}
if (disassembler_style) {
annotate.opts.disassembler_style = strdup(disassembler_style);
if (!annotate.opts.disassembler_style)
return -ENOMEM;
}
if (objdump_path) {
annotate.opts.objdump_path = strdup(objdump_path);
if (!annotate.opts.objdump_path)
return -ENOMEM;
}
if (addr2line_path) {
symbol_conf.addr2line_path = strdup(addr2line_path);
if (!symbol_conf.addr2line_path)
return -ENOMEM;
}
if (annotate_check_args(&annotate.opts) < 0)
return -EINVAL;
#ifdef HAVE_GTK2_SUPPORT
if (symbol_conf.show_nr_samples && annotate.use_gtk) {
pr_err("--show-nr-samples is not available in --gtk mode at this time\n");
return ret;
}
#endif
ret = symbol__validate_sym_arguments();
if (ret)
return ret;
if (quiet)
perf_quiet_option();
data.path = input_name;
annotate.session = perf_session__new(&data, &annotate.tool);
if (IS_ERR(annotate.session))
return PTR_ERR(annotate.session);
annotate.session->itrace_synth_opts = &itrace_synth_opts;
annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
HEADER_BRANCH_STACK);
if (annotate.group_set)
evlist__force_leader(annotate.session->evlist);
ret = symbol__annotation_init();
if (ret < 0)
goto out_delete;
symbol_conf.try_vmlinux_path = true;
ret = symbol__init(&annotate.session->header.env);
if (ret < 0)
goto out_delete;
if (annotate.use_stdio || annotate.use_stdio2)
use_browser = 0;
#ifdef HAVE_SLANG_SUPPORT
else if (annotate.use_tui)
use_browser = 1;
#endif
#ifdef HAVE_GTK2_SUPPORT
else if (annotate.use_gtk)
use_browser = 2;
#endif
setup_browser(true);
/*
* Events of different processes may correspond to the same
* symbol, we do not care about the processes in annotate,
* set sort order to avoid repeated output.
*/
sort_order = "dso,symbol";
/*
* Set SORT_MODE__BRANCH so that annotate display IPC/Cycle
* if branch info is in perf data in TUI mode.
*/
if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack)
sort__mode = SORT_MODE__BRANCH;
if (setup_sorting(NULL) < 0)
usage_with_options(annotate_usage, options);
ret = __cmd_annotate(&annotate);
out_delete:
/*
* Speed up the exit process by only deleting for debug builds. For
* large files this can save time.
*/
#ifndef NDEBUG
perf_session__delete(annotate.session);
#endif
annotation_options__exit(&annotate.opts);
return ret;
}
| linux-master | tools/perf/builtin-annotate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* builtin-probe.c
*
* Builtin probe command: Set up probe events by C expression
*
* Written by Masami Hiramatsu <[email protected]>
*/
#include <sys/utsname.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include "builtin.h"
#include "namespaces.h"
#include "util/build-id.h"
#include "util/strlist.h"
#include "util/strfilter.h"
#include "util/symbol.h"
#include "util/symbol_conf.h"
#include "util/debug.h"
#include <subcmd/parse-options.h>
#include "util/probe-finder.h"
#include "util/probe-event.h"
#include "util/probe-file.h"
#include <linux/string.h>
#include <linux/zalloc.h>
#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
#define DEFAULT_FUNC_FILTER "!_* & !*@plt"
#define DEFAULT_LIST_FILTER "*"
/* Session management structure */
static struct {
int command; /* Command short_name */
bool list_events;
bool uprobes;
bool target_used;
int nevents;
struct perf_probe_event events[MAX_PROBES];
struct line_range line_range;
char *target;
struct strfilter *filter;
struct nsinfo *nsi;
} *params;
/* Parse an event definition. Note that any error must die. */
static int parse_probe_event(const char *str)
{
struct perf_probe_event *pev = ¶ms->events[params->nevents];
int ret;
pr_debug("probe-definition(%d): %s\n", params->nevents, str);
if (++params->nevents == MAX_PROBES) {
pr_err("Too many probes (> %d) were specified.", MAX_PROBES);
return -1;
}
pev->uprobes = params->uprobes;
if (params->target) {
pev->target = strdup(params->target);
if (!pev->target)
return -ENOMEM;
params->target_used = true;
}
pev->nsi = nsinfo__get(params->nsi);
/* Parse a perf-probe command into event */
ret = parse_perf_probe_command(str, pev);
pr_debug("%d arguments\n", pev->nargs);
return ret;
}
static int params_add_filter(const char *str)
{
const char *err = NULL;
int ret = 0;
pr_debug2("Add filter: %s\n", str);
if (!params->filter) {
params->filter = strfilter__new(str, &err);
if (!params->filter)
ret = err ? -EINVAL : -ENOMEM;
} else
ret = strfilter__or(params->filter, str, &err);
if (ret == -EINVAL) {
pr_err("Filter parse error at %td.\n", err - str + 1);
pr_err("Source: \"%s\"\n", str);
pr_err(" %*c\n", (int)(err - str + 1), '^');
}
return ret;
}
static int set_target(const char *ptr)
{
int found = 0;
const char *buf;
/*
* The first argument after options can be an absolute path
* to an executable / library or kernel module.
*
* TODO: Support relative path, and $PATH, $LD_LIBRARY_PATH,
* short module name.
*/
if (!params->target && ptr && *ptr == '/') {
params->target = strdup(ptr);
if (!params->target)
return -ENOMEM;
params->target_used = false;
found = 1;
buf = ptr + (strlen(ptr) - 3);
if (strcmp(buf, ".ko"))
params->uprobes = true;
}
return found;
}
static int parse_probe_event_argv(int argc, const char **argv)
{
int i, len, ret, found_target;
char *buf;
found_target = set_target(argv[0]);
if (found_target < 0)
return found_target;
if (found_target && argc == 1)
return 0;
/* Bind up rest arguments */
len = 0;
for (i = 0; i < argc; i++) {
if (i == 0 && found_target)
continue;
len += strlen(argv[i]) + 1;
}
buf = zalloc(len + 1);
if (buf == NULL)
return -ENOMEM;
len = 0;
for (i = 0; i < argc; i++) {
if (i == 0 && found_target)
continue;
len += sprintf(&buf[len], "%s ", argv[i]);
}
ret = parse_probe_event(buf);
free(buf);
return ret;
}
static int opt_set_target(const struct option *opt, const char *str,
int unset __maybe_unused)
{
int ret = -ENOENT;
char *tmp;
if (str) {
if (!strcmp(opt->long_name, "exec"))
params->uprobes = true;
else if (!strcmp(opt->long_name, "module"))
params->uprobes = false;
else
return ret;
/* Expand given path to absolute path, except for modulename */
if (params->uprobes || strchr(str, '/')) {
tmp = nsinfo__realpath(str, params->nsi);
if (!tmp) {
pr_warning("Failed to get the absolute path of %s: %m\n", str);
return ret;
}
} else {
tmp = strdup(str);
if (!tmp)
return -ENOMEM;
}
free(params->target);
params->target = tmp;
params->target_used = false;
ret = 0;
}
return ret;
}
static int opt_set_target_ns(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
int ret = -ENOENT;
pid_t ns_pid;
struct nsinfo *nsip;
if (str) {
errno = 0;
ns_pid = (pid_t)strtol(str, NULL, 10);
if (errno != 0) {
ret = -errno;
pr_warning("Failed to parse %s as a pid: %s\n", str,
strerror(errno));
return ret;
}
nsip = nsinfo__new(ns_pid);
if (nsip && nsinfo__need_setns(nsip))
params->nsi = nsinfo__get(nsip);
nsinfo__put(nsip);
ret = 0;
}
return ret;
}
/* Command option callbacks */
#ifdef HAVE_DWARF_SUPPORT
static int opt_show_lines(const struct option *opt,
const char *str, int unset __maybe_unused)
{
int ret = 0;
if (!str)
return 0;
if (params->command == 'L') {
pr_warning("Warning: more than one --line options are"
" detected. Only the first one is valid.\n");
return 0;
}
params->command = opt->short_name;
ret = parse_line_range_desc(str, ¶ms->line_range);
return ret;
}
static int opt_show_vars(const struct option *opt,
const char *str, int unset __maybe_unused)
{
struct perf_probe_event *pev = ¶ms->events[params->nevents];
int ret;
if (!str)
return 0;
ret = parse_probe_event(str);
if (!ret && pev->nargs != 0) {
pr_err(" Error: '--vars' doesn't accept arguments.\n");
return -EINVAL;
}
params->command = opt->short_name;
return ret;
}
#else
# define opt_show_lines NULL
# define opt_show_vars NULL
#endif
static int opt_add_probe_event(const struct option *opt,
const char *str, int unset __maybe_unused)
{
if (str) {
params->command = opt->short_name;
return parse_probe_event(str);
}
return 0;
}
static int opt_set_filter_with_command(const struct option *opt,
const char *str, int unset)
{
if (!unset)
params->command = opt->short_name;
if (str)
return params_add_filter(str);
return 0;
}
static int opt_set_filter(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
if (str)
return params_add_filter(str);
return 0;
}
static int init_params(void)
{
int ret;
params = calloc(1, sizeof(*params));
if (!params)
return -ENOMEM;
ret = line_range__init(¶ms->line_range);
if (ret)
zfree(¶ms);
return ret;
}
static void cleanup_params(void)
{
int i;
for (i = 0; i < params->nevents; i++)
clear_perf_probe_event(params->events + i);
line_range__clear(¶ms->line_range);
free(params->target);
strfilter__delete(params->filter);
nsinfo__put(params->nsi);
zfree(¶ms);
}
static void pr_err_with_code(const char *msg, int err)
{
char sbuf[STRERR_BUFSIZE];
pr_err("%s", msg);
pr_debug(" Reason: %s (Code: %d)",
str_error_r(-err, sbuf, sizeof(sbuf)), err);
pr_err("\n");
}
static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
{
int ret;
int i, k;
const char *event = NULL, *group = NULL;
ret = init_probe_symbol_maps(pevs->uprobes);
if (ret < 0)
return ret;
ret = convert_perf_probe_events(pevs, npevs);
if (ret < 0)
goto out_cleanup;
if (params->command == 'D') { /* it shows definition */
if (probe_conf.bootconfig)
ret = show_bootconfig_events(pevs, npevs);
else
ret = show_probe_trace_events(pevs, npevs);
goto out_cleanup;
}
ret = apply_perf_probe_events(pevs, npevs);
if (ret < 0)
goto out_cleanup;
for (i = k = 0; i < npevs; i++)
k += pevs[i].ntevs;
pr_info("Added new event%s\n", (k > 1) ? "s:" : ":");
for (i = 0; i < npevs; i++) {
struct perf_probe_event *pev = &pevs[i];
for (k = 0; k < pev->ntevs; k++) {
struct probe_trace_event *tev = &pev->tevs[k];
/* Skipped events have no event name */
if (!tev->event)
continue;
/* We use tev's name for showing new events */
show_perf_probe_event(tev->group, tev->event, pev,
tev->point.module, false);
/* Save the last valid name */
event = tev->event;
group = tev->group;
}
}
/* Note that it is possible to skip all events because of blacklist */
if (event) {
#ifndef HAVE_LIBTRACEEVENT
pr_info("\nperf is not linked with libtraceevent, to use the new probe you can use tracefs:\n\n");
pr_info("\tcd /sys/kernel/tracing/\n");
pr_info("\techo 1 > events/%s/%s/enable\n", group, event);
pr_info("\techo 1 > tracing_on\n");
pr_info("\tcat trace_pipe\n");
pr_info("\tBefore removing the probe, echo 0 > events/%s/%s/enable\n", group, event);
#else
/* Show how to use the event. */
pr_info("\nYou can now use it in all perf tools, such as:\n\n");
pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event);
#endif
}
out_cleanup:
cleanup_perf_probe_events(pevs, npevs);
exit_probe_symbol_maps();
return ret;
}
static int del_perf_probe_caches(struct strfilter *filter)
{
struct probe_cache *cache;
struct strlist *bidlist;
struct str_node *nd;
int ret;
bidlist = build_id_cache__list_all(false);
if (!bidlist) {
ret = -errno;
pr_debug("Failed to get buildids: %d\n", ret);
return ret ?: -ENOMEM;
}
strlist__for_each_entry(nd, bidlist) {
cache = probe_cache__new(nd->s, NULL);
if (!cache)
continue;
if (probe_cache__filter_purge(cache, filter) < 0 ||
probe_cache__commit(cache) < 0)
pr_warning("Failed to remove entries for %s\n", nd->s);
probe_cache__delete(cache);
}
return 0;
}
static int perf_del_probe_events(struct strfilter *filter)
{
int ret, ret2, ufd = -1, kfd = -1;
char *str = strfilter__string(filter);
struct strlist *klist = NULL, *ulist = NULL;
struct str_node *ent;
if (!str)
return -EINVAL;
pr_debug("Delete filter: \'%s\'\n", str);
if (probe_conf.cache)
return del_perf_probe_caches(filter);
/* Get current event names */
ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
if (ret < 0)
goto out;
klist = strlist__new(NULL, NULL);
ulist = strlist__new(NULL, NULL);
if (!klist || !ulist) {
ret = -ENOMEM;
goto out;
}
ret = probe_file__get_events(kfd, filter, klist);
if (ret == 0) {
strlist__for_each_entry(ent, klist)
pr_info("Removed event: %s\n", ent->s);
ret = probe_file__del_strlist(kfd, klist);
if (ret < 0)
goto error;
} else if (ret == -ENOMEM)
goto error;
ret2 = probe_file__get_events(ufd, filter, ulist);
if (ret2 == 0) {
strlist__for_each_entry(ent, ulist)
pr_info("Removed event: %s\n", ent->s);
ret2 = probe_file__del_strlist(ufd, ulist);
if (ret2 < 0)
goto error;
} else if (ret2 == -ENOMEM)
goto error;
if (ret == -ENOENT && ret2 == -ENOENT)
pr_warning("\"%s\" does not hit any event.\n", str);
else
ret = 0;
error:
if (kfd >= 0)
close(kfd);
if (ufd >= 0)
close(ufd);
out:
strlist__delete(klist);
strlist__delete(ulist);
free(str);
return ret;
}
#ifdef HAVE_DWARF_SUPPORT
#define PROBEDEF_STR \
"[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT [[NAME=]ARG ...]"
#else
#define PROBEDEF_STR "[EVENT=]FUNC[+OFF|%return] [[NAME=]ARG ...]"
#endif
static int
__cmd_probe(int argc, const char **argv)
{
const char * const probe_usage[] = {
"perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list [GROUP:]EVENT ...",
#ifdef HAVE_DWARF_SUPPORT
"perf probe [<options>] --line 'LINEDESC'",
"perf probe [<options>] --vars 'PROBEPOINT'",
#endif
"perf probe [<options>] --funcs",
NULL
};
struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show parsed arguments, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet,
"be quiet (do not show any warnings or messages)"),
OPT_CALLBACK_DEFAULT('l', "list", NULL, "[GROUP:]EVENT",
"list up probe events",
opt_set_filter_with_command, DEFAULT_LIST_FILTER),
OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
opt_set_filter_with_command),
OPT_CALLBACK('a', "add", NULL, PROBEDEF_STR,
"probe point definition, where\n"
"\t\tGROUP:\tGroup name (optional)\n"
"\t\tEVENT:\tEvent name\n"
"\t\tFUNC:\tFunction name\n"
"\t\tOFF:\tOffset from function entry (in byte)\n"
"\t\t%return:\tPut the probe at function return\n"
#ifdef HAVE_DWARF_SUPPORT
"\t\tSRC:\tSource code path\n"
"\t\tRL:\tRelative line number from function entry.\n"
"\t\tAL:\tAbsolute line number in file.\n"
"\t\tPT:\tLazy expression of line code.\n"
"\t\tARG:\tProbe argument (local variable name or\n"
"\t\t\tkprobe-tracer argument format.)\n",
#else
"\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n",
#endif
opt_add_probe_event),
OPT_CALLBACK('D', "definition", NULL, PROBEDEF_STR,
"Show trace event definition of given traceevent for k/uprobe_events.",
opt_add_probe_event),
OPT_BOOLEAN('f', "force", &probe_conf.force_add, "forcibly add events"
" with existing name"),
OPT_CALLBACK('L', "line", NULL,
"FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
"Show source code lines.", opt_show_lines),
OPT_CALLBACK('V', "vars", NULL,
"FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT",
"Show accessible variables on PROBEDEF", opt_show_vars),
OPT_BOOLEAN('\0', "externs", &probe_conf.show_ext_vars,
"Show external variables too (with --vars only)"),
OPT_BOOLEAN('\0', "range", &probe_conf.show_location_range,
"Show variables location range in scope (with --vars only)"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING('s', "source", &symbol_conf.source_prefix,
"directory", "path to kernel source"),
OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines,
"Don't search inlined functions"),
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &probe_conf.max_probes,
"Set how many probe points can be found for a probe."),
OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
"Show potential probe-able functions.",
opt_set_filter_with_command, DEFAULT_FUNC_FILTER),
OPT_CALLBACK('\0', "filter", NULL,
"[!]FILTER", "Set a filter (with --vars/funcs only)\n"
"\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n"
"\t\t\t \"" DEFAULT_FUNC_FILTER "\" for --funcs)",
opt_set_filter),
OPT_CALLBACK('x', "exec", NULL, "executable|path",
"target executable name or path", opt_set_target),
OPT_CALLBACK('m', "module", NULL, "modname|path",
"target module name (for online) or path (for offline)",
opt_set_target),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Enable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "cache", &probe_conf.cache, "Manipulate probe cache"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_CALLBACK(0, "target-ns", NULL, "pid",
"target pid for namespace contexts", opt_set_target_ns),
OPT_BOOLEAN(0, "bootconfig", &probe_conf.bootconfig,
"Output probe definition with bootconfig format"),
OPT_END()
};
int ret;
set_option_flag(options, 'a', "add", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 'd', "del", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 'D', "definition", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 'l', "list", PARSE_OPT_EXCLUSIVE);
#ifdef HAVE_DWARF_SUPPORT
set_option_flag(options, 'L', "line", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 'V', "vars", PARSE_OPT_EXCLUSIVE);
#else
# define set_nobuild(s, l, c) set_option_nobuild(options, s, l, "NO_DWARF=1", c)
set_nobuild('L', "line", false);
set_nobuild('V', "vars", false);
set_nobuild('\0', "externs", false);
set_nobuild('\0', "range", false);
set_nobuild('k', "vmlinux", true);
set_nobuild('s', "source", true);
set_nobuild('\0', "no-inlines", true);
# undef set_nobuild
#endif
set_option_flag(options, 'F', "funcs", PARSE_OPT_EXCLUSIVE);
argc = parse_options(argc, argv, options, probe_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (quiet) {
if (verbose != 0) {
pr_err(" Error: -v and -q are exclusive.\n");
return -EINVAL;
}
verbose = -1;
}
if (argc > 0) {
if (strcmp(argv[0], "-") == 0) {
usage_with_options_msg(probe_usage, options,
"'-' is not supported.\n");
}
if (params->command && params->command != 'a') {
usage_with_options_msg(probe_usage, options,
"another command except --add is set.\n");
}
ret = parse_probe_event_argv(argc, argv);
if (ret < 0) {
pr_err_with_code(" Error: Command Parse Error.", ret);
return ret;
}
params->command = 'a';
}
ret = symbol__validate_sym_arguments();
if (ret)
return ret;
if (probe_conf.max_probes == 0)
probe_conf.max_probes = MAX_PROBES;
/*
* Only consider the user's kernel image path if given.
*/
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
/*
* Except for --list, --del and --add, other command doesn't depend
* nor change running kernel. So if user gives offline vmlinux,
* ignore its buildid.
*/
if (!strchr("lda", params->command) && symbol_conf.vmlinux_name)
symbol_conf.ignore_vmlinux_buildid = true;
switch (params->command) {
case 'l':
if (params->uprobes) {
pr_err(" Error: Don't use --list with --exec.\n");
parse_options_usage(probe_usage, options, "l", true);
parse_options_usage(NULL, options, "x", true);
return -EINVAL;
}
ret = show_perf_probe_events(params->filter);
if (ret < 0)
pr_err_with_code(" Error: Failed to show event list.", ret);
return ret;
case 'F':
ret = show_available_funcs(params->target, params->nsi,
params->filter, params->uprobes);
if (ret < 0)
pr_err_with_code(" Error: Failed to show functions.", ret);
return ret;
#ifdef HAVE_DWARF_SUPPORT
case 'L':
ret = show_line_range(¶ms->line_range, params->target,
params->nsi, params->uprobes);
if (ret < 0)
pr_err_with_code(" Error: Failed to show lines.", ret);
return ret;
case 'V':
if (!params->filter)
params->filter = strfilter__new(DEFAULT_VAR_FILTER,
NULL);
ret = show_available_vars(params->events, params->nevents,
params->filter);
if (ret < 0)
pr_err_with_code(" Error: Failed to show vars.", ret);
return ret;
#endif
case 'd':
ret = perf_del_probe_events(params->filter);
if (ret < 0) {
pr_err_with_code(" Error: Failed to delete events.", ret);
return ret;
}
break;
case 'D':
if (probe_conf.bootconfig && params->uprobes) {
pr_err(" Error: --bootconfig doesn't support uprobes.\n");
return -EINVAL;
}
fallthrough;
case 'a':
/* Ensure the last given target is used */
if (params->target && !params->target_used) {
pr_err(" Error: -x/-m must follow the probe definitions.\n");
parse_options_usage(probe_usage, options, "m", true);
parse_options_usage(NULL, options, "x", true);
return -EINVAL;
}
ret = perf_add_probe_events(params->events, params->nevents);
if (ret < 0) {
/*
* When perf_add_probe_events() fails it calls
* cleanup_perf_probe_events(pevs, npevs), i.e.
* cleanup_perf_probe_events(params->events, params->nevents), which
* will call clear_perf_probe_event(), so set nevents to zero
* to avoid cleanup_params() to call clear_perf_probe_event() again
* on the same pevs.
*/
params->nevents = 0;
pr_err_with_code(" Error: Failed to add events.", ret);
return ret;
}
break;
default:
usage_with_options(probe_usage, options);
}
return 0;
}
int cmd_probe(int argc, const char **argv)
{
int ret;
ret = init_params();
if (!ret) {
ret = __cmd_probe(argc, argv);
cleanup_params();
}
return ret < 0 ? ret : 0;
}
| linux-master | tools/perf/builtin-probe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-inject.c
*
* Builtin inject command: Examine the live mode (stdin) event stream
* and repipe it to stdout while optionally injecting additional
* events into it.
*/
#include "builtin.h"
#include "util/color.h"
#include "util/dso.h"
#include "util/vdso.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/debug.h"
#include "util/build-id.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/jit.h"
#include "util/string2.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/thread.h"
#include "util/namespaces.h"
#include "util/util.h"
#include "util/tsc.h"
#include <internal/lib.h>
#include <linux/err.h>
#include <subcmd/parse-options.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <linux/list.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <linux/hash.h>
#include <ctype.h>
#include <errno.h>
#include <signal.h>
#include <inttypes.h>
struct guest_event {
struct perf_sample sample;
union perf_event *event;
char *event_buf;
};
struct guest_id {
/* hlist_node must be first, see free_hlist() */
struct hlist_node node;
u64 id;
u64 host_id;
u32 vcpu;
};
struct guest_tid {
/* hlist_node must be first, see free_hlist() */
struct hlist_node node;
/* Thread ID of QEMU thread */
u32 tid;
u32 vcpu;
};
struct guest_vcpu {
/* Current host CPU */
u32 cpu;
/* Thread ID of QEMU thread */
u32 tid;
};
struct guest_session {
char *perf_data_file;
u32 machine_pid;
u64 time_offset;
double time_scale;
struct perf_tool tool;
struct perf_data data;
struct perf_session *session;
char *tmp_file_name;
int tmp_fd;
struct perf_tsc_conversion host_tc;
struct perf_tsc_conversion guest_tc;
bool copy_kcore_dir;
bool have_tc;
bool fetched;
bool ready;
u16 dflt_id_hdr_size;
u64 dflt_id;
u64 highest_id;
/* Array of guest_vcpu */
struct guest_vcpu *vcpu;
size_t vcpu_cnt;
/* Hash table for guest_id */
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
/* Hash table for guest_tid */
struct hlist_head tids[PERF_EVLIST__HLIST_SIZE];
/* Place to stash next guest event */
struct guest_event ev;
};
struct perf_inject {
struct perf_tool tool;
struct perf_session *session;
bool build_ids;
bool build_id_all;
bool sched_stat;
bool have_auxtrace;
bool strip;
bool jit_mode;
bool in_place_update;
bool in_place_update_dry_run;
bool is_pipe;
bool copy_kcore_dir;
const char *input_name;
struct perf_data output;
u64 bytes_written;
u64 aux_id;
struct list_head samples;
struct itrace_synth_opts itrace_synth_opts;
char *event_copy;
struct perf_file_section secs[HEADER_FEAT_BITS];
struct guest_session guest_session;
struct strlist *known_build_ids;
};
struct event_entry {
struct list_head node;
u32 tid;
union perf_event event[];
};
static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
struct machine *machine, u8 cpumode, u32 flags);
static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
{
ssize_t size;
size = perf_data__write(&inject->output, buf, sz);
if (size < 0)
return -errno;
inject->bytes_written += size;
return 0;
}
static int perf_event__repipe_synth(struct perf_tool *tool,
union perf_event *event)
{
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
return output_bytes(inject, event, event->header.size);
}
static int perf_event__repipe_oe_synth(struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe __maybe_unused)
{
return perf_event__repipe_synth(tool, event);
}
#ifdef HAVE_JITDUMP
static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe __maybe_unused)
{
return 0;
}
#endif
static int perf_event__repipe_op2_synth(struct perf_session *session,
union perf_event *event)
{
return perf_event__repipe_synth(session->tool, event);
}
static int perf_event__repipe_op4_synth(struct perf_session *session,
union perf_event *event,
u64 data __maybe_unused,
const char *str __maybe_unused)
{
return perf_event__repipe_synth(session->tool, event);
}
static int perf_event__repipe_attr(struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist)
{
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
int ret;
ret = perf_event__process_attr(tool, event, pevlist);
if (ret)
return ret;
if (!inject->is_pipe)
return 0;
return perf_event__repipe_synth(tool, event);
}
static int perf_event__repipe_event_update(struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist __maybe_unused)
{
return perf_event__repipe_synth(tool, event);
}
#ifdef HAVE_AUXTRACE_SUPPORT
static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
{
char buf[4096];
ssize_t ssz;
int ret;
while (size > 0) {
ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
if (ssz < 0)
return -errno;
ret = output_bytes(inject, buf, ssz);
if (ret)
return ret;
size -= ssz;
}
return 0;
}
static s64 perf_event__repipe_auxtrace(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
int ret;
inject->have_auxtrace = true;
if (!inject->output.is_pipe) {
off_t offset;
offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
if (offset == -1)
return -errno;
ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
event, offset);
if (ret < 0)
return ret;
}
if (perf_data__is_pipe(session->data) || !session->one_mmap) {
ret = output_bytes(inject, event, event->header.size);
if (ret < 0)
return ret;
ret = copy_bytes(inject, session->data,
event->auxtrace.size);
} else {
ret = output_bytes(inject, event,
event->header.size + event->auxtrace.size);
}
if (ret < 0)
return ret;
return event->auxtrace.size;
}
#else
static s64
perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
pr_err("AUX area tracing not supported\n");
return -EINVAL;
}
#endif
static int perf_event__repipe(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
return perf_event__repipe_synth(tool, event);
}
static int perf_event__drop(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
static int perf_event__drop_aux(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
if (!inject->aux_id)
inject->aux_id = sample->id;
return 0;
}
static union perf_event *
perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
union perf_event *event,
struct perf_sample *sample)
{
size_t sz1 = sample->aux_sample.data - (void *)event;
size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
union perf_event *ev;
if (inject->event_copy == NULL) {
inject->event_copy = malloc(PERF_SAMPLE_MAX_SIZE);
if (!inject->event_copy)
return ERR_PTR(-ENOMEM);
}
ev = (union perf_event *)inject->event_copy;
if (sz1 > event->header.size || sz2 > event->header.size ||
sz1 + sz2 > event->header.size ||
sz1 < sizeof(struct perf_event_header) + sizeof(u64))
return event;
memcpy(ev, event, sz1);
memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
ev->header.size = sz1 + sz2;
((u64 *)((void *)ev + sz1))[-1] = 0;
return ev;
}
typedef int (*inject_handler)(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine);
static int perf_event__repipe_sample(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
if (evsel && evsel->handler) {
inject_handler f = evsel->handler;
return f(tool, event, sample, evsel, machine);
}
build_id__mark_dso_hit(tool, event, sample, evsel, machine);
if (inject->itrace_synth_opts.set && sample->aux_sample.size) {
event = perf_inject__cut_auxtrace_sample(inject, event, sample);
if (IS_ERR(event))
return PTR_ERR(event);
}
return perf_event__repipe_synth(tool, event);
}
static int perf_event__repipe_mmap(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_mmap(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
}
#ifdef HAVE_JITDUMP
static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u64 n = 0;
int ret;
/*
* if jit marker, then inject jit mmaps and generate ELF images
*/
ret = jit_process(inject->session, &inject->output, machine,
event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
if (ret < 0)
return ret;
if (ret) {
inject->bytes_written += n;
return 0;
}
return perf_event__repipe_mmap(tool, event, sample, machine);
}
#endif
static struct dso *findnew_dso(int pid, int tid, const char *filename,
struct dso_id *id, struct machine *machine)
{
struct thread *thread;
struct nsinfo *nsi = NULL;
struct nsinfo *nnsi;
struct dso *dso;
bool vdso;
thread = machine__findnew_thread(machine, pid, tid);
if (thread == NULL) {
pr_err("cannot find or create a task %d/%d.\n", tid, pid);
return NULL;
}
vdso = is_vdso_map(filename);
nsi = nsinfo__get(thread__nsinfo(thread));
if (vdso) {
/* The vdso maps are always on the host and not the
* container. Ensure that we don't use setns to look
* them up.
*/
nnsi = nsinfo__copy(nsi);
if (nnsi) {
nsinfo__put(nsi);
nsinfo__clear_need_setns(nnsi);
nsi = nnsi;
}
dso = machine__findnew_vdso(machine, thread);
} else {
dso = machine__findnew_dso_id(machine, filename, id);
}
if (dso) {
mutex_lock(&dso->lock);
nsinfo__put(dso->nsinfo);
dso->nsinfo = nsi;
mutex_unlock(&dso->lock);
} else
nsinfo__put(nsi);
thread__put(thread);
return dso;
}
static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct dso *dso;
dso = findnew_dso(event->mmap.pid, event->mmap.tid,
event->mmap.filename, NULL, machine);
if (dso && !dso->hit) {
dso->hit = 1;
dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
}
dso__put(dso);
return perf_event__repipe(tool, event, sample, machine);
}
static int perf_event__repipe_mmap2(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_mmap2(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
struct dso *dso;
dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
event->mmap2.filename, NULL, machine);
if (dso) {
/* mark it not to inject build-id */
dso->hit = 1;
}
dso__put(dso);
}
return err;
}
#ifdef HAVE_JITDUMP
static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u64 n = 0;
int ret;
/*
* if jit marker, then inject jit mmaps and generate ELF images
*/
ret = jit_process(inject->session, &inject->output, machine,
event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
if (ret < 0)
return ret;
if (ret) {
inject->bytes_written += n;
return 0;
}
return perf_event__repipe_mmap2(tool, event, sample, machine);
}
#endif
static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct dso_id dso_id = {
.maj = event->mmap2.maj,
.min = event->mmap2.min,
.ino = event->mmap2.ino,
.ino_generation = event->mmap2.ino_generation,
};
struct dso *dso;
if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
/* cannot use dso_id since it'd have invalid info */
dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
event->mmap2.filename, NULL, machine);
if (dso) {
/* mark it not to inject build-id */
dso->hit = 1;
}
dso__put(dso);
perf_event__repipe(tool, event, sample, machine);
return 0;
}
dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
event->mmap2.filename, &dso_id, machine);
if (dso && !dso->hit) {
dso->hit = 1;
dso__inject_build_id(dso, tool, machine, sample->cpumode,
event->mmap2.flags);
}
dso__put(dso);
perf_event__repipe(tool, event, sample, machine);
return 0;
}
static int perf_event__repipe_fork(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_fork(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
}
static int perf_event__repipe_comm(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_comm(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
}
static int perf_event__repipe_namespaces(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err = perf_event__process_namespaces(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
}
static int perf_event__repipe_exit(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_exit(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
}
#ifdef HAVE_LIBTRACEEVENT
static int perf_event__repipe_tracing_data(struct perf_session *session,
union perf_event *event)
{
perf_event__repipe_synth(session->tool, event);
return perf_event__process_tracing_data(session, event);
}
#endif
static int dso__read_build_id(struct dso *dso)
{
struct nscookie nsc;
if (dso->has_build_id)
return 0;
mutex_lock(&dso->lock);
nsinfo__mountns_enter(dso->nsinfo, &nsc);
if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
dso->has_build_id = true;
else if (dso->nsinfo) {
char *new_name = dso__filename_with_chroot(dso, dso->long_name);
if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
dso->has_build_id = true;
free(new_name);
}
nsinfo__mountns_exit(&nsc);
mutex_unlock(&dso->lock);
return dso->has_build_id ? 0 : -1;
}
static struct strlist *perf_inject__parse_known_build_ids(
const char *known_build_ids_string)
{
struct str_node *pos, *tmp;
struct strlist *known_build_ids;
int bid_len;
known_build_ids = strlist__new(known_build_ids_string, NULL);
if (known_build_ids == NULL)
return NULL;
strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
const char *build_id, *dso_name;
build_id = skip_spaces(pos->s);
dso_name = strchr(build_id, ' ');
if (dso_name == NULL) {
strlist__remove(known_build_ids, pos);
continue;
}
bid_len = dso_name - pos->s;
dso_name = skip_spaces(dso_name);
if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
strlist__remove(known_build_ids, pos);
continue;
}
for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
if (!isxdigit(build_id[2 * ix]) ||
!isxdigit(build_id[2 * ix + 1])) {
strlist__remove(known_build_ids, pos);
break;
}
}
}
return known_build_ids;
}
static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
struct dso *dso)
{
struct str_node *pos;
int bid_len;
strlist__for_each_entry(pos, inject->known_build_ids) {
const char *build_id, *dso_name;
build_id = skip_spaces(pos->s);
dso_name = strchr(build_id, ' ');
bid_len = dso_name - pos->s;
dso_name = skip_spaces(dso_name);
if (strcmp(dso->long_name, dso_name))
continue;
for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
hex(build_id[2 * ix + 1]));
}
dso->bid.size = bid_len / 2;
dso->has_build_id = 1;
return true;
}
return false;
}
static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
struct machine *machine, u8 cpumode, u32 flags)
{
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
int err;
if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
return 0;
if (is_no_dso_memory(dso->long_name))
return 0;
if (inject->known_build_ids != NULL &&
perf_inject__lookup_known_build_id(inject, dso))
return 1;
if (dso__read_build_id(dso) < 0) {
pr_debug("no build_id found for %s\n", dso->long_name);
return -1;
}
err = perf_event__synthesize_build_id(tool, dso, cpumode,
perf_event__repipe, machine);
if (err) {
pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
return -1;
}
return 0;
}
int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
struct thread *thread;
addr_location__init(&al);
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
goto repipe;
}
if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
struct dso *dso = map__dso(al.map);
if (!dso->hit) {
dso->hit = 1;
dso__inject_build_id(dso, tool, machine,
sample->cpumode, map__flags(al.map));
}
}
thread__put(thread);
repipe:
perf_event__repipe(tool, event, sample, machine);
addr_location__exit(&al);
return 0;
}
static int perf_inject__sched_process_exit(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
struct event_entry *ent;
list_for_each_entry(ent, &inject->samples, node) {
if (sample->tid == ent->tid) {
list_del_init(&ent->node);
free(ent);
break;
}
}
return 0;
}
static int perf_inject__sched_switch(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
struct event_entry *ent;
perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
ent = malloc(event->header.size + sizeof(struct event_entry));
if (ent == NULL) {
color_fprintf(stderr, PERF_COLOR_RED,
"Not enough memory to process sched switch event!");
return -1;
}
ent->tid = sample->tid;
memcpy(&ent->event, event, event->header.size);
list_add(&ent->node, &inject->samples);
return 0;
}
#ifdef HAVE_LIBTRACEEVENT
static int perf_inject__sched_stat(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct event_entry *ent;
union perf_event *event_sw;
struct perf_sample sample_sw;
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u32 pid = evsel__intval(evsel, sample, "pid");
list_for_each_entry(ent, &inject->samples, node) {
if (pid == ent->tid)
goto found;
}
return 0;
found:
event_sw = &ent->event[0];
evsel__parse_sample(evsel, event_sw, &sample_sw);
sample_sw.period = sample->period;
sample_sw.time = sample->time;
perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
evsel->core.attr.read_format, &sample_sw);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
#endif
static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
{
if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
return NULL;
return &gs->vcpu[vcpu];
}
static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
{
ssize_t ret = writen(gs->tmp_fd, buf, sz);
return ret < 0 ? ret : 0;
}
static int guest_session__repipe(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct guest_session *gs = container_of(tool, struct guest_session, tool);
return guest_session__output_bytes(gs, event, event->header.size);
}
static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
{
struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
int hash;
if (!guest_tid)
return -ENOMEM;
guest_tid->tid = tid;
guest_tid->vcpu = vcpu;
hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
hlist_add_head(&guest_tid->node, &gs->tids[hash]);
return 0;
}
static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
union perf_event *event,
u64 offset __maybe_unused, void *data)
{
struct guest_session *gs = data;
unsigned int vcpu;
struct guest_vcpu *guest_vcpu;
int ret;
if (event->header.type != PERF_RECORD_COMM ||
event->comm.pid != gs->machine_pid)
return 0;
/*
* QEMU option -name debug-threads=on, causes thread names formatted as
* below, although it is not an ABI. Also libvirt seems to use this by
* default. Here we rely on it to tell us which thread is which VCPU.
*/
ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
if (ret <= 0)
return ret;
pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
event->comm.tid, event->comm.comm, vcpu);
if (vcpu > INT_MAX) {
pr_err("Invalid VCPU %u\n", vcpu);
return -EINVAL;
}
guest_vcpu = guest_session__vcpu(gs, vcpu);
if (!guest_vcpu)
return -ENOMEM;
if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
pr_err("Fatal error: Two threads found with the same VCPU\n");
return -EINVAL;
}
guest_vcpu->tid = event->comm.tid;
return guest_session__map_tid(gs, event->comm.tid, vcpu);
}
static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
{
return perf_session__peek_events(session, session->header.data_offset,
session->header.data_size,
host_peek_vm_comms_cb, gs);
}
static bool evlist__is_id_used(struct evlist *evlist, u64 id)
{
return evlist__id2sid(evlist, id);
}
static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
{
do {
gs->highest_id += 1;
} while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
return gs->highest_id;
}
static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
{
struct guest_id *guest_id = zalloc(sizeof(*guest_id));
int hash;
if (!guest_id)
return -ENOMEM;
guest_id->id = id;
guest_id->host_id = host_id;
guest_id->vcpu = vcpu;
hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
hlist_add_head(&guest_id->node, &gs->heads[hash]);
return 0;
}
static u64 evlist__find_highest_id(struct evlist *evlist)
{
struct evsel *evsel;
u64 highest_id = 1;
evlist__for_each_entry(evlist, evsel) {
u32 j;
for (j = 0; j < evsel->core.ids; j++) {
u64 id = evsel->core.id[j];
if (id > highest_id)
highest_id = id;
}
}
return highest_id;
}
static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
{
struct evlist *evlist = gs->session->evlist;
struct evsel *evsel;
int ret;
evlist__for_each_entry(evlist, evsel) {
u32 j;
for (j = 0; j < evsel->core.ids; j++) {
struct perf_sample_id *sid;
u64 host_id;
u64 id;
id = evsel->core.id[j];
sid = evlist__id2sid(evlist, id);
if (!sid || sid->cpu.cpu == -1)
continue;
host_id = guest_session__allocate_new_id(gs, host_evlist);
ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
if (ret)
return ret;
}
}
return 0;
}
static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
{
struct hlist_head *head;
struct guest_id *guest_id;
int hash;
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &gs->heads[hash];
hlist_for_each_entry(guest_id, head, node)
if (guest_id->id == id)
return guest_id;
return NULL;
}
static int process_attr(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
return perf_event__process_attr(tool, event, &inject->session->evlist);
}
static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
{
struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
struct perf_event_attr attr = evsel->core.attr;
u64 *id_array;
u32 *vcpu_array;
int ret = -ENOMEM;
u32 i;
id_array = calloc(evsel->core.ids, sizeof(*id_array));
if (!id_array)
return -ENOMEM;
vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
if (!vcpu_array)
goto out;
for (i = 0; i < evsel->core.ids; i++) {
u64 id = evsel->core.id[i];
struct guest_id *guest_id = guest_session__lookup_id(gs, id);
if (!guest_id) {
pr_err("Failed to find guest id %"PRIu64"\n", id);
ret = -EINVAL;
goto out;
}
id_array[i] = guest_id->host_id;
vcpu_array[i] = guest_id->vcpu;
}
attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
attr.exclude_host = 1;
attr.exclude_guest = 0;
ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
id_array, process_attr);
if (ret)
pr_err("Failed to add guest attr.\n");
for (i = 0; i < evsel->core.ids; i++) {
struct perf_sample_id *sid;
u32 vcpu = vcpu_array[i];
sid = evlist__id2sid(inject->session->evlist, id_array[i]);
/* Guest event is per-thread from the host point of view */
sid->cpu.cpu = -1;
sid->tid = gs->vcpu[vcpu].tid;
sid->machine_pid = gs->machine_pid;
sid->vcpu.cpu = vcpu;
}
out:
free(vcpu_array);
free(id_array);
return ret;
}
static int guest_session__add_attrs(struct guest_session *gs)
{
struct evlist *evlist = gs->session->evlist;
struct evsel *evsel;
int ret;
evlist__for_each_entry(evlist, evsel) {
ret = guest_session__add_attr(gs, evsel);
if (ret)
return ret;
}
return 0;
}
static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
{
struct perf_session *session = inject->session;
struct evlist *evlist = session->evlist;
struct machine *machine = &session->machines.host;
size_t from = evlist->core.nr_entries - new_cnt;
return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
evlist, machine, from);
}
static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
{
struct hlist_head *head;
struct guest_tid *guest_tid;
int hash;
hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
head = &gs->tids[hash];
hlist_for_each_entry(guest_tid, head, node)
if (guest_tid->tid == tid)
return guest_tid;
return NULL;
}
static bool dso__is_in_kernel_space(struct dso *dso)
{
if (dso__is_vdso(dso))
return false;
return dso__is_kcore(dso) ||
dso->kernel ||
is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
}
static u64 evlist__first_id(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.ids)
return evsel->core.id[0];
}
return 0;
}
static int process_build_id(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
return perf_event__process_build_id(inject->session, event);
}
static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
{
struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
u8 cpumode = dso__is_in_kernel_space(dso) ?
PERF_RECORD_MISC_GUEST_KERNEL :
PERF_RECORD_MISC_GUEST_USER;
if (!machine)
return -ENOMEM;
dso->hit = 1;
return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
process_build_id, machine);
}
static int guest_session__add_build_ids(struct guest_session *gs)
{
struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
struct machine *machine = &gs->session->machines.host;
struct dso *dso;
int ret;
/* Build IDs will be put in the Build ID feature section */
perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
dsos__for_each_with_build_id(dso, &machine->dsos.head) {
ret = synthesize_build_id(inject, dso, gs->machine_pid);
if (ret)
return ret;
}
return 0;
}
static int guest_session__ksymbol_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct guest_session *gs = container_of(tool, struct guest_session, tool);
/* Only support out-of-line i.e. no BPF support */
if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
return 0;
return guest_session__output_bytes(gs, event, event->header.size);
}
static int guest_session__start(struct guest_session *gs, const char *name, bool force)
{
char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
struct perf_session *session;
int ret;
/* Only these events will be injected */
gs->tool.mmap = guest_session__repipe;
gs->tool.mmap2 = guest_session__repipe;
gs->tool.comm = guest_session__repipe;
gs->tool.fork = guest_session__repipe;
gs->tool.exit = guest_session__repipe;
gs->tool.lost = guest_session__repipe;
gs->tool.context_switch = guest_session__repipe;
gs->tool.ksymbol = guest_session__ksymbol_event;
gs->tool.text_poke = guest_session__repipe;
/*
* Processing a build ID creates a struct dso with that build ID. Later,
* all guest dsos are iterated and the build IDs processed into the host
* session where they will be output to the Build ID feature section
* when the perf.data file header is written.
*/
gs->tool.build_id = perf_event__process_build_id;
/* Process the id index to know what VCPU an ID belongs to */
gs->tool.id_index = perf_event__process_id_index;
gs->tool.ordered_events = true;
gs->tool.ordering_requires_timestamps = true;
gs->data.path = name;
gs->data.force = force;
gs->data.mode = PERF_DATA_MODE_READ;
session = perf_session__new(&gs->data, &gs->tool);
if (IS_ERR(session))
return PTR_ERR(session);
gs->session = session;
/*
* Initial events have zero'd ID samples. Get default ID sample size
* used for removing them.
*/
gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
/* And default ID for adding back a host-compatible ID sample */
gs->dflt_id = evlist__first_id(session->evlist);
if (!gs->dflt_id) {
pr_err("Guest data has no sample IDs");
return -EINVAL;
}
/* Temporary file for guest events */
gs->tmp_file_name = strdup(tmp_file_name);
if (!gs->tmp_file_name)
return -ENOMEM;
gs->tmp_fd = mkstemp(gs->tmp_file_name);
if (gs->tmp_fd < 0)
return -errno;
if (zstd_init(&gs->session->zstd_data, 0) < 0)
pr_warning("Guest session decompression initialization failed.\n");
/*
* perf does not support processing 2 sessions simultaneously, so output
* guest events to a temporary file.
*/
ret = perf_session__process_events(gs->session);
if (ret)
return ret;
if (lseek(gs->tmp_fd, 0, SEEK_SET))
return -errno;
return 0;
}
/* Free hlist nodes assuming hlist_node is the first member of hlist entries */
static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
{
struct hlist_node *pos, *n;
size_t i;
for (i = 0; i < hlist_sz; ++i) {
hlist_for_each_safe(pos, n, &heads[i]) {
hlist_del(pos);
free(pos);
}
}
}
static void guest_session__exit(struct guest_session *gs)
{
if (gs->session) {
perf_session__delete(gs->session);
free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
}
if (gs->tmp_file_name) {
if (gs->tmp_fd >= 0)
close(gs->tmp_fd);
unlink(gs->tmp_file_name);
zfree(&gs->tmp_file_name);
}
zfree(&gs->vcpu);
zfree(&gs->perf_data_file);
}
static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
{
tc->time_shift = time_conv->time_shift;
tc->time_mult = time_conv->time_mult;
tc->time_zero = time_conv->time_zero;
tc->time_cycles = time_conv->time_cycles;
tc->time_mask = time_conv->time_mask;
tc->cap_user_time_zero = time_conv->cap_user_time_zero;
tc->cap_user_time_short = time_conv->cap_user_time_short;
}
static void guest_session__get_tc(struct guest_session *gs)
{
struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
}
static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
{
u64 tsc;
if (!guest_time) {
*host_time = 0;
return;
}
if (gs->guest_tc.cap_user_time_zero)
tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
else
tsc = guest_time;
/*
* This is the correct order of operations for x86 if the TSC Offset and
* Multiplier values are used.
*/
tsc -= gs->time_offset;
tsc /= gs->time_scale;
if (gs->host_tc.cap_user_time_zero)
*host_time = tsc_to_perf_time(tsc, &gs->host_tc);
else
*host_time = tsc;
}
static int guest_session__fetch(struct guest_session *gs)
{
void *buf;
struct perf_event_header *hdr;
size_t hdr_sz = sizeof(*hdr);
ssize_t ret;
buf = gs->ev.event_buf;
if (!buf) {
buf = malloc(PERF_SAMPLE_MAX_SIZE);
if (!buf)
return -ENOMEM;
gs->ev.event_buf = buf;
}
hdr = buf;
ret = readn(gs->tmp_fd, buf, hdr_sz);
if (ret < 0)
return ret;
if (!ret) {
/* Zero size means EOF */
hdr->size = 0;
return 0;
}
buf += hdr_sz;
ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
if (ret < 0)
return ret;
gs->ev.event = (union perf_event *)gs->ev.event_buf;
gs->ev.sample.time = 0;
if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
pr_err("Unexpected type fetching guest event");
return 0;
}
ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
if (ret) {
pr_err("Parse failed fetching guest event");
return ret;
}
if (!gs->have_tc) {
guest_session__get_tc(gs);
gs->have_tc = true;
}
guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
return 0;
}
static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
const struct perf_sample *sample)
{
struct evsel *evsel;
void *array;
int ret;
evsel = evlist__id2evsel(evlist, sample->id);
array = ev;
if (!evsel) {
pr_err("No evsel for id %"PRIu64"\n", sample->id);
return -EINVAL;
}
array += ev->header.size;
ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
if (ret < 0)
return ret;
if (ret & 7) {
pr_err("Bad id sample size %d\n", ret);
return -EINVAL;
}
ev->header.size += ret;
return 0;
}
static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
{
struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
int ret;
if (!gs->ready)
return 0;
while (1) {
struct perf_sample *sample;
struct guest_id *guest_id;
union perf_event *ev;
u16 id_hdr_size;
u8 cpumode;
u64 id;
if (!gs->fetched) {
ret = guest_session__fetch(gs);
if (ret)
return ret;
gs->fetched = true;
}
ev = gs->ev.event;
sample = &gs->ev.sample;
if (!ev->header.size)
return 0; /* EOF */
if (sample->time > timestamp)
return 0;
/* Change cpumode to guest */
cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
if (cpumode & PERF_RECORD_MISC_USER)
cpumode = PERF_RECORD_MISC_GUEST_USER;
else
cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
ev->header.misc |= cpumode;
id = sample->id;
if (!id) {
id = gs->dflt_id;
id_hdr_size = gs->dflt_id_hdr_size;
} else {
struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
id_hdr_size = evsel__id_hdr_size(evsel);
}
if (id_hdr_size & 7) {
pr_err("Bad id_hdr_size %u\n", id_hdr_size);
return -EINVAL;
}
if (ev->header.size & 7) {
pr_err("Bad event size %u\n", ev->header.size);
return -EINVAL;
}
/* Remove guest id sample */
ev->header.size -= id_hdr_size;
if (ev->header.size & 7) {
pr_err("Bad raw event size %u\n", ev->header.size);
return -EINVAL;
}
guest_id = guest_session__lookup_id(gs, id);
if (!guest_id) {
pr_err("Guest event with unknown id %llu\n",
(unsigned long long)id);
return -EINVAL;
}
/* Change to host ID to avoid conflicting ID values */
sample->id = guest_id->host_id;
sample->stream_id = guest_id->host_id;
if (sample->cpu != (u32)-1) {
if (sample->cpu >= gs->vcpu_cnt) {
pr_err("Guest event with unknown VCPU %u\n",
sample->cpu);
return -EINVAL;
}
/* Change to host CPU instead of guest VCPU */
sample->cpu = gs->vcpu[sample->cpu].cpu;
}
/* New id sample with new ID and CPU */
ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
if (ret)
return ret;
if (ev->header.size & 7) {
pr_err("Bad new event size %u\n", ev->header.size);
return -EINVAL;
}
gs->fetched = false;
ret = output_bytes(inject, ev, ev->header.size);
if (ret)
return ret;
}
}
static int guest_session__flush_events(struct guest_session *gs)
{
return guest_session__inject_events(gs, -1);
}
static int host__repipe(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
int ret;
ret = guest_session__inject_events(&inject->guest_session, sample->time);
if (ret)
return ret;
return perf_event__repipe(tool, event, sample, machine);
}
static int host__finished_init(struct perf_session *session, union perf_event *event)
{
struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
struct guest_session *gs = &inject->guest_session;
int ret;
/*
* Peek through host COMM events to find QEMU threads and the VCPU they
* are running.
*/
ret = host_peek_vm_comms(session, gs);
if (ret)
return ret;
if (!gs->vcpu_cnt) {
pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
return -EINVAL;
}
/*
* Allocate new (unused) host sample IDs and map them to the guest IDs.
*/
gs->highest_id = evlist__find_highest_id(session->evlist);
ret = guest_session__map_ids(gs, session->evlist);
if (ret)
return ret;
ret = guest_session__add_attrs(gs);
if (ret)
return ret;
ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
if (ret) {
pr_err("Failed to synthesize id_index\n");
return ret;
}
ret = guest_session__add_build_ids(gs);
if (ret) {
pr_err("Failed to add guest build IDs\n");
return ret;
}
gs->ready = true;
ret = guest_session__inject_events(gs, 0);
if (ret)
return ret;
return perf_event__repipe_op2_synth(session, event);
}
/*
* Obey finished-round ordering. The FINISHED_ROUND event is first processed
* which flushes host events to file up until the last flush time. Then inject
* guest events up to the same time. Finally write out the FINISHED_ROUND event
* itself.
*/
static int host__finished_round(struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
int ret = perf_event__process_finished_round(tool, event, oe);
u64 timestamp = ordered_events__last_flush_time(oe);
if (ret)
return ret;
ret = guest_session__inject_events(&inject->guest_session, timestamp);
if (ret)
return ret;
return perf_event__repipe_oe_synth(tool, event, oe);
}
static int host__context_switch(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
struct guest_session *gs = &inject->guest_session;
u32 pid = event->context_switch.next_prev_pid;
u32 tid = event->context_switch.next_prev_tid;
struct guest_tid *guest_tid;
u32 vcpu;
if (out || pid != gs->machine_pid)
goto out;
guest_tid = guest_session__lookup_tid(gs, tid);
if (!guest_tid)
goto out;
if (sample->cpu == (u32)-1) {
pr_err("Switch event does not have CPU\n");
return -EINVAL;
}
vcpu = guest_tid->vcpu;
if (vcpu >= gs->vcpu_cnt)
return -EINVAL;
/* Guest is switching in, record which CPU the VCPU is now running on */
gs->vcpu[vcpu].cpu = sample->cpu;
out:
return host__repipe(tool, event, sample, machine);
}
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
{
struct perf_event_attr *attr = &evsel->core.attr;
const char *name = evsel__name(evsel);
if (!(attr->sample_type & sample_type)) {
pr_err("Samples for %s event do not have %s attribute set.",
name, sample_msg);
return -EINVAL;
}
return 0;
}
static int drop_sample(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
static void strip_init(struct perf_inject *inject)
{
struct evlist *evlist = inject->session->evlist;
struct evsel *evsel;
inject->tool.context_switch = perf_event__drop;
evlist__for_each_entry(evlist, evsel)
evsel->handler = drop_sample;
}
static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
{
struct perf_inject *inject = opt->value;
const char *args;
char *dry_run;
if (unset)
return 0;
inject->itrace_synth_opts.set = true;
inject->itrace_synth_opts.vm_time_correlation = true;
inject->in_place_update = true;
if (!str)
return 0;
dry_run = skip_spaces(str);
if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
inject->in_place_update_dry_run = true;
args = dry_run + strlen("dry-run");
} else {
args = str;
}
inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
}
static int parse_guest_data(const struct option *opt, const char *str, int unset)
{
struct perf_inject *inject = opt->value;
struct guest_session *gs = &inject->guest_session;
char *tok;
char *s;
if (unset)
return 0;
if (!str)
goto bad_args;
s = strdup(str);
if (!s)
return -ENOMEM;
gs->perf_data_file = strsep(&s, ",");
if (!gs->perf_data_file)
goto bad_args;
gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
if (gs->copy_kcore_dir)
inject->output.is_dir = true;
tok = strsep(&s, ",");
if (!tok)
goto bad_args;
gs->machine_pid = strtoul(tok, NULL, 0);
if (!inject->guest_session.machine_pid)
goto bad_args;
gs->time_scale = 1;
tok = strsep(&s, ",");
if (!tok)
goto out;
gs->time_offset = strtoull(tok, NULL, 0);
tok = strsep(&s, ",");
if (!tok)
goto out;
gs->time_scale = strtod(tok, NULL);
if (!gs->time_scale)
goto bad_args;
out:
return 0;
bad_args:
pr_err("--guest-data option requires guest perf.data file name, "
"guest machine PID, and optionally guest timestamp offset, "
"and guest timestamp scale factor, separated by commas.\n");
return -1;
}
static int save_section_info_cb(struct perf_file_section *section,
struct perf_header *ph __maybe_unused,
int feat, int fd __maybe_unused, void *data)
{
struct perf_inject *inject = data;
inject->secs[feat] = *section;
return 0;
}
static int save_section_info(struct perf_inject *inject)
{
struct perf_header *header = &inject->session->header;
int fd = perf_data__fd(inject->session->data);
return perf_header__process_sections(header, fd, inject, save_section_info_cb);
}
static bool keep_feat(int feat)
{
switch (feat) {
/* Keep original information that describes the machine or software */
case HEADER_TRACING_DATA:
case HEADER_HOSTNAME:
case HEADER_OSRELEASE:
case HEADER_VERSION:
case HEADER_ARCH:
case HEADER_NRCPUS:
case HEADER_CPUDESC:
case HEADER_CPUID:
case HEADER_TOTAL_MEM:
case HEADER_CPU_TOPOLOGY:
case HEADER_NUMA_TOPOLOGY:
case HEADER_PMU_MAPPINGS:
case HEADER_CACHE:
case HEADER_MEM_TOPOLOGY:
case HEADER_CLOCKID:
case HEADER_BPF_PROG_INFO:
case HEADER_BPF_BTF:
case HEADER_CPU_PMU_CAPS:
case HEADER_CLOCK_DATA:
case HEADER_HYBRID_TOPOLOGY:
case HEADER_PMU_CAPS:
return true;
/* Information that can be updated */
case HEADER_BUILD_ID:
case HEADER_CMDLINE:
case HEADER_EVENT_DESC:
case HEADER_BRANCH_STACK:
case HEADER_GROUP_DESC:
case HEADER_AUXTRACE:
case HEADER_STAT:
case HEADER_SAMPLE_TIME:
case HEADER_DIR_FORMAT:
case HEADER_COMPRESSED:
default:
return false;
};
}
static int read_file(int fd, u64 offs, void *buf, size_t sz)
{
ssize_t ret = preadn(fd, buf, sz, offs);
if (ret < 0)
return -errno;
if ((size_t)ret != sz)
return -EINVAL;
return 0;
}
static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
{
int fd = perf_data__fd(inject->session->data);
u64 offs = inject->secs[feat].offset;
size_t sz = inject->secs[feat].size;
void *buf = malloc(sz);
int ret;
if (!buf)
return -ENOMEM;
ret = read_file(fd, offs, buf, sz);
if (ret)
goto out_free;
ret = fw->write(fw, buf, sz);
out_free:
free(buf);
return ret;
}
struct inject_fc {
struct feat_copier fc;
struct perf_inject *inject;
};
static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
{
struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
struct perf_inject *inject = inj_fc->inject;
int ret;
if (!inject->secs[feat].offset ||
!keep_feat(feat))
return 0;
ret = feat_copy(inject, feat, fw);
if (ret < 0)
return ret;
return 1; /* Feature section copied */
}
static int copy_kcore_dir(struct perf_inject *inject)
{
char *cmd;
int ret;
ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
inject->input_name, inject->output.path);
if (ret < 0)
return ret;
pr_debug("%s\n", cmd);
ret = system(cmd);
free(cmd);
return ret;
}
static int guest_session__copy_kcore_dir(struct guest_session *gs)
{
struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
char *cmd;
int ret;
ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
gs->perf_data_file, inject->output.path, gs->machine_pid);
if (ret < 0)
return ret;
pr_debug("%s\n", cmd);
ret = system(cmd);
free(cmd);
return ret;
}
static int output_fd(struct perf_inject *inject)
{
return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
}
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
struct guest_session *gs = &inject->guest_session;
struct perf_session *session = inject->session;
int fd = output_fd(inject);
u64 output_data_offset;
signal(SIGINT, sig_handler);
if (inject->build_ids || inject->sched_stat ||
inject->itrace_synth_opts.set || inject->build_id_all) {
inject->tool.mmap = perf_event__repipe_mmap;
inject->tool.mmap2 = perf_event__repipe_mmap2;
inject->tool.fork = perf_event__repipe_fork;
#ifdef HAVE_LIBTRACEEVENT
inject->tool.tracing_data = perf_event__repipe_tracing_data;
#endif
}
output_data_offset = perf_session__data_offset(session->evlist);
if (inject->build_id_all) {
inject->tool.mmap = perf_event__repipe_buildid_mmap;
inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
} else if (inject->build_ids) {
inject->tool.sample = perf_event__inject_buildid;
} else if (inject->sched_stat) {
struct evsel *evsel;
evlist__for_each_entry(session->evlist, evsel) {
const char *name = evsel__name(evsel);
if (!strcmp(name, "sched:sched_switch")) {
if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
return -EINVAL;
evsel->handler = perf_inject__sched_switch;
} else if (!strcmp(name, "sched:sched_process_exit"))
evsel->handler = perf_inject__sched_process_exit;
#ifdef HAVE_LIBTRACEEVENT
else if (!strncmp(name, "sched:sched_stat_", 17))
evsel->handler = perf_inject__sched_stat;
#endif
}
} else if (inject->itrace_synth_opts.vm_time_correlation) {
session->itrace_synth_opts = &inject->itrace_synth_opts;
memset(&inject->tool, 0, sizeof(inject->tool));
inject->tool.id_index = perf_event__process_id_index;
inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
inject->tool.auxtrace = perf_event__process_auxtrace;
inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
} else if (inject->itrace_synth_opts.set) {
session->itrace_synth_opts = &inject->itrace_synth_opts;
inject->itrace_synth_opts.inject = true;
inject->tool.comm = perf_event__repipe_comm;
inject->tool.namespaces = perf_event__repipe_namespaces;
inject->tool.exit = perf_event__repipe_exit;
inject->tool.id_index = perf_event__process_id_index;
inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
inject->tool.auxtrace = perf_event__process_auxtrace;
inject->tool.aux = perf_event__drop_aux;
inject->tool.itrace_start = perf_event__drop_aux;
inject->tool.aux_output_hw_id = perf_event__drop_aux;
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
/* Allow space in the header for new attributes */
output_data_offset = roundup(8192 + session->header.data_offset, 4096);
if (inject->strip)
strip_init(inject);
} else if (gs->perf_data_file) {
char *name = gs->perf_data_file;
/*
* Not strictly necessary, but keep these events in order wrt
* guest events.
*/
inject->tool.mmap = host__repipe;
inject->tool.mmap2 = host__repipe;
inject->tool.comm = host__repipe;
inject->tool.fork = host__repipe;
inject->tool.exit = host__repipe;
inject->tool.lost = host__repipe;
inject->tool.context_switch = host__repipe;
inject->tool.ksymbol = host__repipe;
inject->tool.text_poke = host__repipe;
/*
* Once the host session has initialized, set up sample ID
* mapping and feed in guest attrs, build IDs and initial
* events.
*/
inject->tool.finished_init = host__finished_init;
/* Obey finished round ordering */
inject->tool.finished_round = host__finished_round,
/* Keep track of which CPU a VCPU is runnng on */
inject->tool.context_switch = host__context_switch;
/*
* Must order events to be able to obey finished round
* ordering.
*/
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
/* Set up a separate session to process guest perf.data file */
ret = guest_session__start(gs, name, session->data->force);
if (ret) {
pr_err("Failed to process %s, error %d\n", name, ret);
return ret;
}
/* Allow space in the header for guest attributes */
output_data_offset += gs->session->header.data_offset;
output_data_offset = roundup(output_data_offset, 4096);
}
if (!inject->itrace_synth_opts.set)
auxtrace_index__free(&session->auxtrace_index);
if (!inject->is_pipe && !inject->in_place_update)
lseek(fd, output_data_offset, SEEK_SET);
ret = perf_session__process_events(session);
if (ret)
return ret;
if (gs->session) {
/*
* Remaining guest events have later timestamps. Flush them
* out to file.
*/
ret = guest_session__flush_events(gs);
if (ret) {
pr_err("Failed to flush guest events\n");
return ret;
}
}
if (!inject->is_pipe && !inject->in_place_update) {
struct inject_fc inj_fc = {
.fc.copy = feat_copy_cb,
.inject = inject,
};
if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
/*
* Keep all buildids when there is unprocessed AUX data because
* it is not known which ones the AUX trace hits.
*/
if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
inject->have_auxtrace && !inject->itrace_synth_opts.set)
dsos__hit_all(session);
/*
* The AUX areas have been removed and replaced with
* synthesized hardware events, so clear the feature flag.
*/
if (inject->itrace_synth_opts.set) {
perf_header__clear_feat(&session->header,
HEADER_AUXTRACE);
if (inject->itrace_synth_opts.last_branch ||
inject->itrace_synth_opts.add_last_branch)
perf_header__set_feat(&session->header,
HEADER_BRANCH_STACK);
}
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;
perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
if (inject->copy_kcore_dir) {
ret = copy_kcore_dir(inject);
if (ret) {
pr_err("Failed to copy kcore\n");
return ret;
}
}
if (gs->copy_kcore_dir) {
ret = guest_session__copy_kcore_dir(gs);
if (ret) {
pr_err("Failed to copy guest kcore\n");
return ret;
}
}
}
return ret;
}
int cmd_inject(int argc, const char **argv)
{
struct perf_inject inject = {
.tool = {
.sample = perf_event__repipe_sample,
.read = perf_event__repipe_sample,
.mmap = perf_event__repipe,
.mmap2 = perf_event__repipe,
.comm = perf_event__repipe,
.namespaces = perf_event__repipe,
.cgroup = perf_event__repipe,
.fork = perf_event__repipe,
.exit = perf_event__repipe,
.lost = perf_event__repipe,
.lost_samples = perf_event__repipe,
.aux = perf_event__repipe,
.itrace_start = perf_event__repipe,
.aux_output_hw_id = perf_event__repipe,
.context_switch = perf_event__repipe,
.throttle = perf_event__repipe,
.unthrottle = perf_event__repipe,
.ksymbol = perf_event__repipe,
.bpf = perf_event__repipe,
.text_poke = perf_event__repipe,
.attr = perf_event__repipe_attr,
.event_update = perf_event__repipe_event_update,
.tracing_data = perf_event__repipe_op2_synth,
.finished_round = perf_event__repipe_oe_synth,
.build_id = perf_event__repipe_op2_synth,
.id_index = perf_event__repipe_op2_synth,
.auxtrace_info = perf_event__repipe_op2_synth,
.auxtrace_error = perf_event__repipe_op2_synth,
.time_conv = perf_event__repipe_op2_synth,
.thread_map = perf_event__repipe_op2_synth,
.cpu_map = perf_event__repipe_op2_synth,
.stat_config = perf_event__repipe_op2_synth,
.stat = perf_event__repipe_op2_synth,
.stat_round = perf_event__repipe_op2_synth,
.feature = perf_event__repipe_op2_synth,
.finished_init = perf_event__repipe_op2_synth,
.compressed = perf_event__repipe_op4_synth,
.auxtrace = perf_event__repipe_auxtrace,
},
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
.path = "-",
.mode = PERF_DATA_MODE_WRITE,
.use_stdio = true,
},
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
.use_stdio = true,
};
int ret;
bool repipe = true;
const char *known_build_ids = NULL;
struct option options[] = {
OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
"Inject build-ids into the output stream"),
OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
"Inject build-ids of all DSOs into the output stream"),
OPT_STRING(0, "known-build-ids", &known_build_ids,
"buildid path [,buildid path...]",
"build-ids to use for given paths"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
OPT_STRING('o', "output", &inject.output.path, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
"where and how long tasks slept"),
#ifdef HAVE_JITDUMP
OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
#endif
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show build ids, etc)"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
"don't load vmlinux even if found"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
"kallsyms pathname"),
OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
NULL, "opts", "Instruction Tracing options\n"
ITRACE_HELP,
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "strip", &inject.strip,
"strip non-synthesized events (use with --itrace)"),
OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
"correlate time between VM guests and the host",
parse_vm_time_correlation),
OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
"inject events from a guest perf.data file",
parse_guest_data),
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
"guest mount directory under which every guest os"
" instance has a subdir"),
OPT_END()
};
const char * const inject_usage[] = {
"perf inject [<options>]",
NULL
};
#ifndef HAVE_JITDUMP
set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
#endif
argc = parse_options(argc, argv, options, inject_usage, 0);
/*
* Any (unrecognized) arguments left?
*/
if (argc)
usage_with_options(inject_usage, options);
if (inject.strip && !inject.itrace_synth_opts.set) {
pr_err("--strip option requires --itrace option\n");
return -1;
}
if (symbol__validate_sym_arguments())
return -1;
if (inject.in_place_update) {
if (!strcmp(inject.input_name, "-")) {
pr_err("Input file name required for in-place updating\n");
return -1;
}
if (strcmp(inject.output.path, "-")) {
pr_err("Output file name must not be specified for in-place updating\n");
return -1;
}
if (!data.force && !inject.in_place_update_dry_run) {
pr_err("The input file would be updated in place, "
"the --force option is required.\n");
return -1;
}
if (!inject.in_place_update_dry_run)
data.in_place_update = true;
} else {
if (strcmp(inject.output.path, "-") && !inject.strip &&
has_kcore_dir(inject.input_name)) {
inject.output.is_dir = true;
inject.copy_kcore_dir = true;
}
if (perf_data__open(&inject.output)) {
perror("failed to create output file");
return -1;
}
}
data.path = inject.input_name;
if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
inject.is_pipe = true;
/*
* Do not repipe header when input is a regular file
* since either it can rewrite the header at the end
* or write a new pipe header.
*/
if (strcmp(inject.input_name, "-"))
repipe = false;
}
inject.session = __perf_session__new(&data, repipe,
output_fd(&inject),
&inject.tool);
if (IS_ERR(inject.session)) {
ret = PTR_ERR(inject.session);
goto out_close_output;
}
if (zstd_init(&(inject.session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed.\n");
/* Save original section info before feature bits change */
ret = save_section_info(&inject);
if (ret)
goto out_delete;
if (!data.is_pipe && inject.output.is_pipe) {
ret = perf_header__write_pipe(perf_data__fd(&inject.output));
if (ret < 0) {
pr_err("Couldn't write a new pipe header.\n");
goto out_delete;
}
ret = perf_event__synthesize_for_pipe(&inject.tool,
inject.session,
&inject.output,
perf_event__repipe);
if (ret < 0)
goto out_delete;
}
if (inject.build_ids && !inject.build_id_all) {
/*
* to make sure the mmap records are ordered correctly
* and so that the correct especially due to jitted code
* mmaps. We cannot generate the buildid hit list and
* inject the jit mmaps at the same time for now.
*/
inject.tool.ordered_events = true;
inject.tool.ordering_requires_timestamps = true;
if (known_build_ids != NULL) {
inject.known_build_ids =
perf_inject__parse_known_build_ids(known_build_ids);
if (inject.known_build_ids == NULL) {
pr_err("Couldn't parse known build ids.\n");
goto out_delete;
}
}
}
if (inject.sched_stat) {
inject.tool.ordered_events = true;
}
#ifdef HAVE_JITDUMP
if (inject.jit_mode) {
inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
inject.tool.mmap = perf_event__jit_repipe_mmap;
inject.tool.ordered_events = true;
inject.tool.ordering_requires_timestamps = true;
/*
* JIT MMAP injection injects all MMAP events in one go, so it
* does not obey finished_round semantics.
*/
inject.tool.finished_round = perf_event__drop_oe;
}
#endif
ret = symbol__init(&inject.session->header.env);
if (ret < 0)
goto out_delete;
ret = __cmd_inject(&inject);
guest_session__exit(&inject.guest_session);
out_delete:
strlist__delete(inject.known_build_ids);
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
out_close_output:
if (!inject.in_place_update)
perf_data__close(&inject.output);
free(inject.itrace_synth_opts.vm_tm_corr_args);
free(inject.event_copy);
free(inject.guest_session.ev.event_buf);
return ret;
}
| linux-master | tools/perf/builtin-inject.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include "builtin.h"
#include "perf.h"
#include "util/evlist.h" // for struct evsel_str_handler
#include "util/evsel.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/target.h"
#include "util/callchain.h"
#include "util/lock-contention.h"
#include "util/bpf_skel/lock_data.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/tracepoint.h"
#include "util/debug.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
#include "util/string2.h"
#include "util/map.h"
#include "util/util.h"
#include <stdio.h>
#include <sys/types.h>
#include <sys/prctl.h>
#include <semaphore.h>
#include <math.h>
#include <limits.h>
#include <ctype.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <linux/err.h>
#include <linux/stringify.h>
static struct perf_session *session;
static struct target target;
/* based on kernel/lockdep.c */
#define LOCKHASH_BITS 12
#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
static struct hlist_head *lockhash_table;
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
static struct rb_root thread_stats;
static bool combine_locks;
static bool show_thread_stats;
static bool show_lock_addrs;
static bool show_lock_owner;
static bool use_bpf;
static unsigned long bpf_map_entries = MAX_ENTRIES;
static int max_stack_depth = CONTENTION_STACK_DEPTH;
static int stack_skip = CONTENTION_STACK_SKIP;
static int print_nr_entries = INT_MAX / 2;
static LIST_HEAD(callstack_filters);
static const char *output_name = NULL;
static FILE *lock_output;
struct callstack_filter {
struct list_head list;
char name[];
};
static struct lock_filter filters;
static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
static bool needs_callstack(void)
{
return !list_empty(&callstack_filters);
}
static struct thread_stat *thread_stat_find(u32 tid)
{
struct rb_node *node;
struct thread_stat *st;
node = thread_stats.rb_node;
while (node) {
st = container_of(node, struct thread_stat, rb);
if (st->tid == tid)
return st;
else if (tid < st->tid)
node = node->rb_left;
else
node = node->rb_right;
}
return NULL;
}
static void thread_stat_insert(struct thread_stat *new)
{
struct rb_node **rb = &thread_stats.rb_node;
struct rb_node *parent = NULL;
struct thread_stat *p;
while (*rb) {
p = container_of(*rb, struct thread_stat, rb);
parent = *rb;
if (new->tid < p->tid)
rb = &(*rb)->rb_left;
else if (new->tid > p->tid)
rb = &(*rb)->rb_right;
else
BUG_ON("inserting invalid thread_stat\n");
}
rb_link_node(&new->rb, parent, rb);
rb_insert_color(&new->rb, &thread_stats);
}
static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
{
struct thread_stat *st;
st = thread_stat_find(tid);
if (st)
return st;
st = zalloc(sizeof(struct thread_stat));
if (!st) {
pr_err("memory allocation failed\n");
return NULL;
}
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
thread_stat_insert(st);
return st;
}
static struct thread_stat *thread_stat_findnew_first(u32 tid);
static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
thread_stat_findnew_first;
static struct thread_stat *thread_stat_findnew_first(u32 tid)
{
struct thread_stat *st;
st = zalloc(sizeof(struct thread_stat));
if (!st) {
pr_err("memory allocation failed\n");
return NULL;
}
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
rb_insert_color(&st->rb, &thread_stats);
thread_stat_findnew = thread_stat_findnew_after_first;
return st;
}
/* build simple key function one is bigger than two */
#define SINGLE_KEY(member) \
static int lock_stat_key_ ## member(struct lock_stat *one, \
struct lock_stat *two) \
{ \
return one->member > two->member; \
}
SINGLE_KEY(nr_acquired)
SINGLE_KEY(nr_contended)
SINGLE_KEY(avg_wait_time)
SINGLE_KEY(wait_time_total)
SINGLE_KEY(wait_time_max)
static int lock_stat_key_wait_time_min(struct lock_stat *one,
struct lock_stat *two)
{
u64 s1 = one->wait_time_min;
u64 s2 = two->wait_time_min;
if (s1 == ULLONG_MAX)
s1 = 0;
if (s2 == ULLONG_MAX)
s2 = 0;
return s1 > s2;
}
struct lock_key {
/*
* name: the value for specify by user
* this should be simpler than raw name of member
* e.g. nr_acquired -> acquired, wait_time_total -> wait_total
*/
const char *name;
/* header: the string printed on the header line */
const char *header;
/* len: the printing width of the field */
int len;
/* key: a pointer to function to compare two lock stats for sorting */
int (*key)(struct lock_stat*, struct lock_stat*);
/* print: a pointer to function to print a given lock stats */
void (*print)(struct lock_key*, struct lock_stat*);
/* list: list entry to link this */
struct list_head list;
};
static void lock_stat_key_print_time(unsigned long long nsec, int len)
{
static const struct {
float base;
const char *unit;
} table[] = {
{ 1e9 * 3600, "h " },
{ 1e9 * 60, "m " },
{ 1e9, "s " },
{ 1e6, "ms" },
{ 1e3, "us" },
{ 0, NULL },
};
/* for CSV output */
if (len == 0) {
fprintf(lock_output, "%llu", nsec);
return;
}
for (int i = 0; table[i].unit; i++) {
if (nsec < table[i].base)
continue;
fprintf(lock_output, "%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
return;
}
fprintf(lock_output, "%*llu %s", len - 3, nsec, "ns");
}
#define PRINT_KEY(member) \
static void lock_stat_key_print_ ## member(struct lock_key *key, \
struct lock_stat *ls) \
{ \
fprintf(lock_output, "%*llu", key->len, (unsigned long long)ls->member);\
}
#define PRINT_TIME(member) \
static void lock_stat_key_print_ ## member(struct lock_key *key, \
struct lock_stat *ls) \
{ \
lock_stat_key_print_time((unsigned long long)ls->member, key->len); \
}
PRINT_KEY(nr_acquired)
PRINT_KEY(nr_contended)
PRINT_TIME(avg_wait_time)
PRINT_TIME(wait_time_total)
PRINT_TIME(wait_time_max)
static void lock_stat_key_print_wait_time_min(struct lock_key *key,
struct lock_stat *ls)
{
u64 wait_time = ls->wait_time_min;
if (wait_time == ULLONG_MAX)
wait_time = 0;
lock_stat_key_print_time(wait_time, key->len);
}
static const char *sort_key = "acquired";
static int (*compare)(struct lock_stat *, struct lock_stat *);
static struct rb_root sorted; /* place to store intermediate data */
static struct rb_root result; /* place to store sorted data */
static LIST_HEAD(lock_keys);
static const char *output_fields;
#define DEF_KEY_LOCK(name, header, fn_suffix, len) \
{ #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
static struct lock_key report_keys[] = {
DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
/* extra comparisons much complicated should be here */
{ }
};
static struct lock_key contention_keys[] = {
DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
/* extra comparisons much complicated should be here */
{ }
};
static int select_key(bool contention)
{
int i;
struct lock_key *keys = report_keys;
if (contention)
keys = contention_keys;
for (i = 0; keys[i].name; i++) {
if (!strcmp(keys[i].name, sort_key)) {
compare = keys[i].key;
/* selected key should be in the output fields */
if (list_empty(&keys[i].list))
list_add_tail(&keys[i].list, &lock_keys);
return 0;
}
}
pr_err("Unknown compare key: %s\n", sort_key);
return -1;
}
static int add_output_field(bool contention, char *name)
{
int i;
struct lock_key *keys = report_keys;
if (contention)
keys = contention_keys;
for (i = 0; keys[i].name; i++) {
if (strcmp(keys[i].name, name))
continue;
/* prevent double link */
if (list_empty(&keys[i].list))
list_add_tail(&keys[i].list, &lock_keys);
return 0;
}
pr_err("Unknown output field: %s\n", name);
return -1;
}
static int setup_output_field(bool contention, const char *str)
{
char *tok, *tmp, *orig;
int i, ret = 0;
struct lock_key *keys = report_keys;
if (contention)
keys = contention_keys;
/* no output field given: use all of them */
if (str == NULL) {
for (i = 0; keys[i].name; i++)
list_add_tail(&keys[i].list, &lock_keys);
return 0;
}
for (i = 0; keys[i].name; i++)
INIT_LIST_HEAD(&keys[i].list);
orig = tmp = strdup(str);
if (orig == NULL)
return -ENOMEM;
while ((tok = strsep(&tmp, ",")) != NULL){
ret = add_output_field(contention, tok);
if (ret < 0)
break;
}
free(orig);
return ret;
}
static void combine_lock_stats(struct lock_stat *st)
{
struct rb_node **rb = &sorted.rb_node;
struct rb_node *parent = NULL;
struct lock_stat *p;
int ret;
while (*rb) {
p = container_of(*rb, struct lock_stat, rb);
parent = *rb;
if (st->name && p->name)
ret = strcmp(st->name, p->name);
else
ret = !!st->name - !!p->name;
if (ret == 0) {
p->nr_acquired += st->nr_acquired;
p->nr_contended += st->nr_contended;
p->wait_time_total += st->wait_time_total;
if (p->nr_contended)
p->avg_wait_time = p->wait_time_total / p->nr_contended;
if (p->wait_time_min > st->wait_time_min)
p->wait_time_min = st->wait_time_min;
if (p->wait_time_max < st->wait_time_max)
p->wait_time_max = st->wait_time_max;
p->broken |= st->broken;
st->combined = 1;
return;
}
if (ret < 0)
rb = &(*rb)->rb_left;
else
rb = &(*rb)->rb_right;
}
rb_link_node(&st->rb, parent, rb);
rb_insert_color(&st->rb, &sorted);
}
static void insert_to_result(struct lock_stat *st,
int (*bigger)(struct lock_stat *, struct lock_stat *))
{
struct rb_node **rb = &result.rb_node;
struct rb_node *parent = NULL;
struct lock_stat *p;
if (combine_locks && st->combined)
return;
while (*rb) {
p = container_of(*rb, struct lock_stat, rb);
parent = *rb;
if (bigger(st, p))
rb = &(*rb)->rb_left;
else
rb = &(*rb)->rb_right;
}
rb_link_node(&st->rb, parent, rb);
rb_insert_color(&st->rb, &result);
}
/* returns left most element of result, and erase it */
static struct lock_stat *pop_from_result(void)
{
struct rb_node *node = result.rb_node;
if (!node)
return NULL;
while (node->rb_left)
node = node->rb_left;
rb_erase(node, &result);
return container_of(node, struct lock_stat, rb);
}
struct lock_stat *lock_stat_find(u64 addr)
{
struct hlist_head *entry = lockhashentry(addr);
struct lock_stat *ret;
hlist_for_each_entry(ret, entry, hash_entry) {
if (ret->addr == addr)
return ret;
}
return NULL;
}
struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
{
struct hlist_head *entry = lockhashentry(addr);
struct lock_stat *ret, *new;
hlist_for_each_entry(ret, entry, hash_entry) {
if (ret->addr == addr)
return ret;
}
new = zalloc(sizeof(struct lock_stat));
if (!new)
goto alloc_failed;
new->addr = addr;
new->name = strdup(name);
if (!new->name) {
free(new);
goto alloc_failed;
}
new->flags = flags;
new->wait_time_min = ULLONG_MAX;
hlist_add_head(&new->hash_entry, entry);
return new;
alloc_failed:
pr_err("memory allocation failed\n");
return NULL;
}
bool match_callstack_filter(struct machine *machine, u64 *callstack)
{
struct map *kmap;
struct symbol *sym;
u64 ip;
if (list_empty(&callstack_filters))
return true;
for (int i = 0; i < max_stack_depth; i++) {
struct callstack_filter *filter;
if (!callstack || !callstack[i])
break;
ip = callstack[i];
sym = machine__find_kernel_symbol(machine, ip, &kmap);
if (sym == NULL)
continue;
list_for_each_entry(filter, &callstack_filters, list) {
if (strstr(sym->name, filter->name))
return true;
}
}
return false;
}
struct trace_lock_handler {
/* it's used on CONFIG_LOCKDEP */
int (*acquire_event)(struct evsel *evsel,
struct perf_sample *sample);
/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
int (*acquired_event)(struct evsel *evsel,
struct perf_sample *sample);
/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
int (*contended_event)(struct evsel *evsel,
struct perf_sample *sample);
/* it's used on CONFIG_LOCKDEP */
int (*release_event)(struct evsel *evsel,
struct perf_sample *sample);
/* it's used when CONFIG_LOCKDEP is off */
int (*contention_begin_event)(struct evsel *evsel,
struct perf_sample *sample);
/* it's used when CONFIG_LOCKDEP is off */
int (*contention_end_event)(struct evsel *evsel,
struct perf_sample *sample);
};
static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
{
struct lock_seq_stat *seq;
list_for_each_entry(seq, &ts->seq_list, list) {
if (seq->addr == addr)
return seq;
}
seq = zalloc(sizeof(struct lock_seq_stat));
if (!seq) {
pr_err("memory allocation failed\n");
return NULL;
}
seq->state = SEQ_STATE_UNINITIALIZED;
seq->addr = addr;
list_add(&seq->list, &ts->seq_list);
return seq;
}
enum broken_state {
BROKEN_ACQUIRE,
BROKEN_ACQUIRED,
BROKEN_CONTENDED,
BROKEN_RELEASE,
BROKEN_MAX,
};
static int bad_hist[BROKEN_MAX];
enum acquire_flags {
TRY_LOCK = 1,
READ_LOCK = 2,
};
static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
{
switch (aggr_mode) {
case LOCK_AGGR_ADDR:
*key = addr;
break;
case LOCK_AGGR_TASK:
*key = tid;
break;
case LOCK_AGGR_CALLER:
default:
pr_err("Invalid aggregation mode: %d\n", aggr_mode);
return -EINVAL;
}
return 0;
}
static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
struct perf_sample *sample)
{
if (aggr_mode == LOCK_AGGR_CALLER) {
*key = callchain_id(evsel, sample);
return 0;
}
return get_key_by_aggr_mode_simple(key, addr, sample->tid);
}
static int report_lock_acquire_event(struct evsel *evsel,
struct perf_sample *sample)
{
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
int flag = evsel__intval(evsel, sample, "flags");
u64 key;
int ret;
ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
if (ret < 0)
return ret;
ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
case SEQ_STATE_RELEASED:
if (!flag) {
seq->state = SEQ_STATE_ACQUIRING;
} else {
if (flag & TRY_LOCK)
ls->nr_trylock++;
if (flag & READ_LOCK)
ls->nr_readlock++;
seq->state = SEQ_STATE_READ_ACQUIRED;
seq->read_count = 1;
ls->nr_acquired++;
}
break;
case SEQ_STATE_READ_ACQUIRED:
if (flag & READ_LOCK) {
seq->read_count++;
ls->nr_acquired++;
goto end;
} else {
goto broken;
}
break;
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
broken:
/* broken lock sequence */
if (!ls->broken) {
ls->broken = 1;
bad_hist[BROKEN_ACQUIRE]++;
}
list_del_init(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
ls->nr_acquire++;
seq->prev_event_time = sample->time;
end:
return 0;
}
static int report_lock_acquired_event(struct evsel *evsel,
struct perf_sample *sample)
{
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 contended_term;
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
u64 key;
int ret;
ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
if (ret < 0)
return ret;
ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_CONTENDED:
contended_term = sample->time - seq->prev_event_time;
ls->wait_time_total += contended_term;
if (contended_term < ls->wait_time_min)
ls->wait_time_min = contended_term;
if (ls->wait_time_max < contended_term)
ls->wait_time_max = contended_term;
break;
case SEQ_STATE_RELEASED:
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
/* broken lock sequence */
if (!ls->broken) {
ls->broken = 1;
bad_hist[BROKEN_ACQUIRED]++;
}
list_del_init(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
seq->state = SEQ_STATE_ACQUIRED;
ls->nr_acquired++;
ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
seq->prev_event_time = sample->time;
end:
return 0;
}
static int report_lock_contended_event(struct evsel *evsel,
struct perf_sample *sample)
{
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
u64 key;
int ret;
ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
if (ret < 0)
return ret;
ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_RELEASED:
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
case SEQ_STATE_CONTENDED:
/* broken lock sequence */
if (!ls->broken) {
ls->broken = 1;
bad_hist[BROKEN_CONTENDED]++;
}
list_del_init(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
seq->state = SEQ_STATE_CONTENDED;
ls->nr_contended++;
ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
seq->prev_event_time = sample->time;
end:
return 0;
}
static int report_lock_release_event(struct evsel *evsel,
struct perf_sample *sample)
{
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
u64 key;
int ret;
ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
if (ret < 0)
return ret;
ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
goto end;
case SEQ_STATE_ACQUIRED:
break;
case SEQ_STATE_READ_ACQUIRED:
seq->read_count--;
BUG_ON(seq->read_count < 0);
if (seq->read_count) {
ls->nr_release++;
goto end;
}
break;
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
case SEQ_STATE_RELEASED:
/* broken lock sequence */
if (!ls->broken) {
ls->broken = 1;
bad_hist[BROKEN_RELEASE]++;
}
goto free_seq;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
ls->nr_release++;
free_seq:
list_del_init(&seq->list);
free(seq);
end:
return 0;
}
static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
char *buf, int size)
{
u64 offset;
if (map == NULL || sym == NULL) {
buf[0] = '\0';
return 0;
}
offset = map__map_ip(map, ip) - sym->start;
if (offset)
return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
else
return strlcpy(buf, sym->name, size);
}
static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
char *buf, int size)
{
struct thread *thread;
struct callchain_cursor *cursor;
struct machine *machine = &session->machines.host;
struct symbol *sym;
int skip = 0;
int ret;
/* lock names will be replaced to task name later */
if (show_thread_stats)
return -1;
thread = machine__findnew_thread(machine, -1, sample->pid);
if (thread == NULL)
return -1;
cursor = get_tls_callchain_cursor();
/* use caller function name from the callchain */
ret = thread__resolve_callchain(thread, cursor, evsel, sample,
NULL, NULL, max_stack_depth);
if (ret != 0) {
thread__put(thread);
return -1;
}
callchain_cursor_commit(cursor);
thread__put(thread);
while (true) {
struct callchain_cursor_node *node;
node = callchain_cursor_current(cursor);
if (node == NULL)
break;
/* skip first few entries - for lock functions */
if (++skip <= stack_skip)
goto next;
sym = node->ms.sym;
if (sym && !machine__is_lock_function(machine, node->ip)) {
get_symbol_name_offset(node->ms.map, sym, node->ip,
buf, size);
return 0;
}
next:
callchain_cursor_advance(cursor);
}
return -1;
}
static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
{
struct callchain_cursor *cursor;
struct machine *machine = &session->machines.host;
struct thread *thread;
u64 hash = 0;
int skip = 0;
int ret;
thread = machine__findnew_thread(machine, -1, sample->pid);
if (thread == NULL)
return -1;
cursor = get_tls_callchain_cursor();
/* use caller function name from the callchain */
ret = thread__resolve_callchain(thread, cursor, evsel, sample,
NULL, NULL, max_stack_depth);
thread__put(thread);
if (ret != 0)
return -1;
callchain_cursor_commit(cursor);
while (true) {
struct callchain_cursor_node *node;
node = callchain_cursor_current(cursor);
if (node == NULL)
break;
/* skip first few entries - for lock functions */
if (++skip <= stack_skip)
goto next;
if (node->ms.sym && machine__is_lock_function(machine, node->ip))
goto next;
hash ^= hash_long((unsigned long)node->ip, 64);
next:
callchain_cursor_advance(cursor);
}
return hash;
}
static u64 *get_callstack(struct perf_sample *sample, int max_stack)
{
u64 *callstack;
u64 i;
int c;
callstack = calloc(max_stack, sizeof(*callstack));
if (callstack == NULL)
return NULL;
for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
u64 ip = sample->callchain->ips[i];
if (ip >= PERF_CONTEXT_MAX)
continue;
callstack[c++] = ip;
}
return callstack;
}
static int report_lock_contention_begin_event(struct evsel *evsel,
struct perf_sample *sample)
{
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 addr = evsel__intval(evsel, sample, "lock_addr");
unsigned int flags = evsel__intval(evsel, sample, "flags");
u64 key;
int i, ret;
static bool kmap_loaded;
struct machine *machine = &session->machines.host;
struct map *kmap;
struct symbol *sym;
ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
if (ret < 0)
return ret;
if (!kmap_loaded) {
unsigned long *addrs;
/* make sure it loads the kernel map to find lock symbols */
map__load(machine__kernel_map(machine));
kmap_loaded = true;
/* convert (kernel) symbols to addresses */
for (i = 0; i < filters.nr_syms; i++) {
sym = machine__find_kernel_symbol_by_name(machine,
filters.syms[i],
&kmap);
if (sym == NULL) {
pr_warning("ignore unknown symbol: %s\n",
filters.syms[i]);
continue;
}
addrs = realloc(filters.addrs,
(filters.nr_addrs + 1) * sizeof(*addrs));
if (addrs == NULL) {
pr_warning("memory allocation failure\n");
return -ENOMEM;
}
addrs[filters.nr_addrs++] = map__unmap_ip(kmap, sym->start);
filters.addrs = addrs;
}
}
ls = lock_stat_find(key);
if (!ls) {
char buf[128];
const char *name = "";
switch (aggr_mode) {
case LOCK_AGGR_ADDR:
sym = machine__find_kernel_symbol(machine, key, &kmap);
if (sym)
name = sym->name;
break;
case LOCK_AGGR_CALLER:
name = buf;
if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
name = "Unknown";
break;
case LOCK_AGGR_TASK:
default:
break;
}
ls = lock_stat_findnew(key, name, flags);
if (!ls)
return -ENOMEM;
}
if (filters.nr_types) {
bool found = false;
for (i = 0; i < filters.nr_types; i++) {
if (flags == filters.types[i]) {
found = true;
break;
}
}
if (!found)
return 0;
}
if (filters.nr_addrs) {
bool found = false;
for (i = 0; i < filters.nr_addrs; i++) {
if (addr == filters.addrs[i]) {
found = true;
break;
}
}
if (!found)
return 0;
}
if (needs_callstack()) {
u64 *callstack = get_callstack(sample, max_stack_depth);
if (callstack == NULL)
return -ENOMEM;
if (!match_callstack_filter(machine, callstack)) {
free(callstack);
return 0;
}
if (ls->callstack == NULL)
ls->callstack = callstack;
else
free(callstack);
}
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
case SEQ_STATE_ACQUIRED:
break;
case SEQ_STATE_CONTENDED:
/*
* It can have nested contention begin with mutex spinning,
* then we would use the original contention begin event and
* ignore the second one.
*/
goto end;
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_READ_ACQUIRED:
case SEQ_STATE_RELEASED:
/* broken lock sequence */
if (!ls->broken) {
ls->broken = 1;
bad_hist[BROKEN_CONTENDED]++;
}
list_del_init(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
if (seq->state != SEQ_STATE_CONTENDED) {
seq->state = SEQ_STATE_CONTENDED;
seq->prev_event_time = sample->time;
ls->nr_contended++;
}
end:
return 0;
}
static int report_lock_contention_end_event(struct evsel *evsel,
struct perf_sample *sample)
{
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 contended_term;
u64 addr = evsel__intval(evsel, sample, "lock_addr");
u64 key;
int ret;
ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
if (ret < 0)
return ret;
ls = lock_stat_find(key);
if (!ls)
return 0;
ts = thread_stat_find(sample->tid);
if (!ts)
return 0;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
goto end;
case SEQ_STATE_CONTENDED:
contended_term = sample->time - seq->prev_event_time;
ls->wait_time_total += contended_term;
if (contended_term < ls->wait_time_min)
ls->wait_time_min = contended_term;
if (ls->wait_time_max < contended_term)
ls->wait_time_max = contended_term;
break;
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
case SEQ_STATE_RELEASED:
/* broken lock sequence */
if (!ls->broken) {
ls->broken = 1;
bad_hist[BROKEN_ACQUIRED]++;
}
list_del_init(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
seq->state = SEQ_STATE_ACQUIRED;
ls->nr_acquired++;
ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
end:
return 0;
}
/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
static struct trace_lock_handler report_lock_ops = {
.acquire_event = report_lock_acquire_event,
.acquired_event = report_lock_acquired_event,
.contended_event = report_lock_contended_event,
.release_event = report_lock_release_event,
.contention_begin_event = report_lock_contention_begin_event,
.contention_end_event = report_lock_contention_end_event,
};
static struct trace_lock_handler contention_lock_ops = {
.contention_begin_event = report_lock_contention_begin_event,
.contention_end_event = report_lock_contention_end_event,
};
static struct trace_lock_handler *trace_handler;
static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->acquire_event)
return trace_handler->acquire_event(evsel, sample);
return 0;
}
static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->acquired_event)
return trace_handler->acquired_event(evsel, sample);
return 0;
}
static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->contended_event)
return trace_handler->contended_event(evsel, sample);
return 0;
}
static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->release_event)
return trace_handler->release_event(evsel, sample);
return 0;
}
static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->contention_begin_event)
return trace_handler->contention_begin_event(evsel, sample);
return 0;
}
static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->contention_end_event)
return trace_handler->contention_end_event(evsel, sample);
return 0;
}
static void print_bad_events(int bad, int total)
{
/* Output for debug, this have to be removed */
int i;
int broken = 0;
const char *name[4] =
{ "acquire", "acquired", "contended", "release" };
for (i = 0; i < BROKEN_MAX; i++)
broken += bad_hist[i];
if (quiet || total == 0 || (broken == 0 && verbose <= 0))
return;
fprintf(lock_output, "\n=== output for debug ===\n\n");
fprintf(lock_output, "bad: %d, total: %d\n", bad, total);
fprintf(lock_output, "bad rate: %.2f %%\n", (double)bad / (double)total * 100);
fprintf(lock_output, "histogram of events caused bad sequence\n");
for (i = 0; i < BROKEN_MAX; i++)
fprintf(lock_output, " %10s: %d\n", name[i], bad_hist[i]);
}
/* TODO: various way to print, coloring, nano or milli sec */
static void print_result(void)
{
struct lock_stat *st;
struct lock_key *key;
char cut_name[20];
int bad, total, printed;
if (!quiet) {
fprintf(lock_output, "%20s ", "Name");
list_for_each_entry(key, &lock_keys, list)
fprintf(lock_output, "%*s ", key->len, key->header);
fprintf(lock_output, "\n\n");
}
bad = total = printed = 0;
while ((st = pop_from_result())) {
total++;
if (st->broken)
bad++;
if (!st->nr_acquired)
continue;
bzero(cut_name, 20);
if (strlen(st->name) < 20) {
/* output raw name */
const char *name = st->name;
if (show_thread_stats) {
struct thread *t;
/* st->addr contains tid of thread */
t = perf_session__findnew(session, st->addr);
name = thread__comm_str(t);
}
fprintf(lock_output, "%20s ", name);
} else {
strncpy(cut_name, st->name, 16);
cut_name[16] = '.';
cut_name[17] = '.';
cut_name[18] = '.';
cut_name[19] = '\0';
/* cut off name for saving output style */
fprintf(lock_output, "%20s ", cut_name);
}
list_for_each_entry(key, &lock_keys, list) {
key->print(key, st);
fprintf(lock_output, " ");
}
fprintf(lock_output, "\n");
if (++printed >= print_nr_entries)
break;
}
print_bad_events(bad, total);
}
static bool info_threads, info_map;
static void dump_threads(void)
{
struct thread_stat *st;
struct rb_node *node;
struct thread *t;
fprintf(lock_output, "%10s: comm\n", "Thread ID");
node = rb_first(&thread_stats);
while (node) {
st = container_of(node, struct thread_stat, rb);
t = perf_session__findnew(session, st->tid);
fprintf(lock_output, "%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
thread__put(t);
}
}
static int compare_maps(struct lock_stat *a, struct lock_stat *b)
{
int ret;
if (a->name && b->name)
ret = strcmp(a->name, b->name);
else
ret = !!a->name - !!b->name;
if (!ret)
return a->addr < b->addr;
else
return ret < 0;
}
static void dump_map(void)
{
unsigned int i;
struct lock_stat *st;
fprintf(lock_output, "Address of instance: name of class\n");
for (i = 0; i < LOCKHASH_SIZE; i++) {
hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
insert_to_result(st, compare_maps);
}
}
while ((st = pop_from_result()))
fprintf(lock_output, " %#llx: %s\n", (unsigned long long)st->addr, st->name);
}
static int dump_info(void)
{
int rc = 0;
if (info_threads)
dump_threads();
else if (info_map)
dump_map();
else {
rc = -1;
pr_err("Unknown type of information\n");
}
return rc;
}
static const struct evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
{ "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
};
static const struct evsel_str_handler contention_tracepoints[] = {
{ "lock:contention_begin", evsel__process_contention_begin, },
{ "lock:contention_end", evsel__process_contention_end, },
};
static int process_event_update(struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist)
{
int ret;
ret = perf_event__process_event_update(tool, event, pevlist);
if (ret < 0)
return ret;
/* this can return -EEXIST since we call it for each evsel */
perf_session__set_tracepoints_handlers(session, lock_tracepoints);
perf_session__set_tracepoints_handlers(session, contention_tracepoints);
return 0;
}
typedef int (*tracepoint_handler)(struct evsel *evsel,
struct perf_sample *sample);
static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
err = f(evsel, sample);
}
thread__put(thread);
return err;
}
static void combine_result(void)
{
unsigned int i;
struct lock_stat *st;
if (!combine_locks)
return;
for (i = 0; i < LOCKHASH_SIZE; i++) {
hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
combine_lock_stats(st);
}
}
}
static void sort_result(void)
{
unsigned int i;
struct lock_stat *st;
for (i = 0; i < LOCKHASH_SIZE; i++) {
hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
insert_to_result(st, compare);
}
}
}
static const struct {
unsigned int flags;
const char *str;
const char *name;
} lock_type_table[] = {
{ 0, "semaphore", "semaphore" },
{ LCB_F_SPIN, "spinlock", "spinlock" },
{ LCB_F_SPIN | LCB_F_READ, "rwlock:R", "rwlock" },
{ LCB_F_SPIN | LCB_F_WRITE, "rwlock:W", "rwlock" },
{ LCB_F_READ, "rwsem:R", "rwsem" },
{ LCB_F_WRITE, "rwsem:W", "rwsem" },
{ LCB_F_RT, "rt-mutex", "rt-mutex" },
{ LCB_F_RT | LCB_F_READ, "rwlock-rt:R", "rwlock-rt" },
{ LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W", "rwlock-rt" },
{ LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R", "percpu-rwsem" },
{ LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W", "percpu-rwsem" },
{ LCB_F_MUTEX, "mutex", "mutex" },
{ LCB_F_MUTEX | LCB_F_SPIN, "mutex", "mutex" },
/* alias for get_type_flag() */
{ LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin", "mutex" },
};
static const char *get_type_str(unsigned int flags)
{
flags &= LCB_F_MAX_FLAGS - 1;
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (lock_type_table[i].flags == flags)
return lock_type_table[i].str;
}
return "unknown";
}
static const char *get_type_name(unsigned int flags)
{
flags &= LCB_F_MAX_FLAGS - 1;
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (lock_type_table[i].flags == flags)
return lock_type_table[i].name;
}
return "unknown";
}
static unsigned int get_type_flag(const char *str)
{
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (!strcmp(lock_type_table[i].name, str))
return lock_type_table[i].flags;
}
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (!strcmp(lock_type_table[i].str, str))
return lock_type_table[i].flags;
}
return UINT_MAX;
}
static void lock_filter_finish(void)
{
zfree(&filters.types);
filters.nr_types = 0;
zfree(&filters.addrs);
filters.nr_addrs = 0;
for (int i = 0; i < filters.nr_syms; i++)
free(filters.syms[i]);
zfree(&filters.syms);
filters.nr_syms = 0;
}
static void sort_contention_result(void)
{
sort_result();
}
static void print_header_stdio(void)
{
struct lock_key *key;
list_for_each_entry(key, &lock_keys, list)
fprintf(lock_output, "%*s ", key->len, key->header);
switch (aggr_mode) {
case LOCK_AGGR_TASK:
fprintf(lock_output, " %10s %s\n\n", "pid",
show_lock_owner ? "owner" : "comm");
break;
case LOCK_AGGR_CALLER:
fprintf(lock_output, " %10s %s\n\n", "type", "caller");
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, " %16s %s\n\n", "address", "symbol");
break;
default:
break;
}
}
static void print_header_csv(const char *sep)
{
struct lock_key *key;
fprintf(lock_output, "# output: ");
list_for_each_entry(key, &lock_keys, list)
fprintf(lock_output, "%s%s ", key->header, sep);
switch (aggr_mode) {
case LOCK_AGGR_TASK:
fprintf(lock_output, "%s%s %s\n", "pid", sep,
show_lock_owner ? "owner" : "comm");
break;
case LOCK_AGGR_CALLER:
fprintf(lock_output, "%s%s %s", "type", sep, "caller");
if (verbose > 0)
fprintf(lock_output, "%s %s", sep, "stacktrace");
fprintf(lock_output, "\n");
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, "%s%s %s%s %s\n", "address", sep, "symbol", sep, "type");
break;
default:
break;
}
}
static void print_header(void)
{
if (!quiet) {
if (symbol_conf.field_sep)
print_header_csv(symbol_conf.field_sep);
else
print_header_stdio();
}
}
static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat *st)
{
struct lock_key *key;
struct thread *t;
int pid;
list_for_each_entry(key, &lock_keys, list) {
key->print(key, st);
fprintf(lock_output, " ");
}
switch (aggr_mode) {
case LOCK_AGGR_CALLER:
fprintf(lock_output, " %10s %s\n", get_type_str(st->flags), st->name);
break;
case LOCK_AGGR_TASK:
pid = st->addr;
t = perf_session__findnew(session, pid);
fprintf(lock_output, " %10d %s\n",
pid, pid == -1 ? "Unknown" : thread__comm_str(t));
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, " %016llx %s (%s)\n", (unsigned long long)st->addr,
st->name, get_type_name(st->flags));
break;
default:
break;
}
if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
struct map *kmap;
struct symbol *sym;
char buf[128];
u64 ip;
for (int i = 0; i < max_stack_depth; i++) {
if (!st->callstack || !st->callstack[i])
break;
ip = st->callstack[i];
sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
fprintf(lock_output, "\t\t\t%#lx %s\n", (unsigned long)ip, buf);
}
}
}
static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *st,
const char *sep)
{
struct lock_key *key;
struct thread *t;
int pid;
list_for_each_entry(key, &lock_keys, list) {
key->print(key, st);
fprintf(lock_output, "%s ", sep);
}
switch (aggr_mode) {
case LOCK_AGGR_CALLER:
fprintf(lock_output, "%s%s %s", get_type_str(st->flags), sep, st->name);
if (verbose <= 0)
fprintf(lock_output, "\n");
break;
case LOCK_AGGR_TASK:
pid = st->addr;
t = perf_session__findnew(session, pid);
fprintf(lock_output, "%d%s %s\n", pid, sep,
pid == -1 ? "Unknown" : thread__comm_str(t));
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, "%llx%s %s%s %s\n", (unsigned long long)st->addr, sep,
st->name, sep, get_type_name(st->flags));
break;
default:
break;
}
if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
struct map *kmap;
struct symbol *sym;
char buf[128];
u64 ip;
for (int i = 0; i < max_stack_depth; i++) {
if (!st->callstack || !st->callstack[i])
break;
ip = st->callstack[i];
sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
fprintf(lock_output, "%s %#lx %s", i ? ":" : sep, (unsigned long) ip, buf);
}
fprintf(lock_output, "\n");
}
}
static void print_lock_stat(struct lock_contention *con, struct lock_stat *st)
{
if (symbol_conf.field_sep)
print_lock_stat_csv(con, st, symbol_conf.field_sep);
else
print_lock_stat_stdio(con, st);
}
static void print_footer_stdio(int total, int bad, struct lock_contention_fails *fails)
{
/* Output for debug, this have to be removed */
int broken = fails->task + fails->stack + fails->time + fails->data;
if (!use_bpf)
print_bad_events(bad, total);
if (quiet || total == 0 || (broken == 0 && verbose <= 0))
return;
total += broken;
fprintf(lock_output, "\n=== output for debug ===\n\n");
fprintf(lock_output, "bad: %d, total: %d\n", broken, total);
fprintf(lock_output, "bad rate: %.2f %%\n", 100.0 * broken / total);
fprintf(lock_output, "histogram of failure reasons\n");
fprintf(lock_output, " %10s: %d\n", "task", fails->task);
fprintf(lock_output, " %10s: %d\n", "stack", fails->stack);
fprintf(lock_output, " %10s: %d\n", "time", fails->time);
fprintf(lock_output, " %10s: %d\n", "data", fails->data);
}
static void print_footer_csv(int total, int bad, struct lock_contention_fails *fails,
const char *sep)
{
/* Output for debug, this have to be removed */
if (use_bpf)
bad = fails->task + fails->stack + fails->time + fails->data;
if (quiet || total == 0 || (bad == 0 && verbose <= 0))
return;
total += bad;
fprintf(lock_output, "# debug: total=%d%s bad=%d", total, sep, bad);
if (use_bpf) {
fprintf(lock_output, "%s bad_%s=%d", sep, "task", fails->task);
fprintf(lock_output, "%s bad_%s=%d", sep, "stack", fails->stack);
fprintf(lock_output, "%s bad_%s=%d", sep, "time", fails->time);
fprintf(lock_output, "%s bad_%s=%d", sep, "data", fails->data);
} else {
int i;
const char *name[4] = { "acquire", "acquired", "contended", "release" };
for (i = 0; i < BROKEN_MAX; i++)
fprintf(lock_output, "%s bad_%s=%d", sep, name[i], bad_hist[i]);
}
fprintf(lock_output, "\n");
}
static void print_footer(int total, int bad, struct lock_contention_fails *fails)
{
if (symbol_conf.field_sep)
print_footer_csv(total, bad, fails, symbol_conf.field_sep);
else
print_footer_stdio(total, bad, fails);
}
static void print_contention_result(struct lock_contention *con)
{
struct lock_stat *st;
int bad, total, printed;
if (!quiet)
print_header();
bad = total = printed = 0;
while ((st = pop_from_result())) {
total += use_bpf ? st->nr_contended : 1;
if (st->broken)
bad++;
if (!st->wait_time_total)
continue;
print_lock_stat(con, st);
if (++printed >= print_nr_entries)
break;
}
if (print_nr_entries) {
/* update the total/bad stats */
while ((st = pop_from_result())) {
total += use_bpf ? st->nr_contended : 1;
if (st->broken)
bad++;
}
}
/* some entries are collected but hidden by the callstack filter */
total += con->nr_filtered;
print_footer(total, bad, &con->fails);
}
static bool force;
static int __cmd_report(bool display_info)
{
int err = -EINVAL;
struct perf_tool eops = {
.attr = perf_event__process_attr,
.event_update = process_event_update,
.sample = process_sample_event,
.comm = perf_event__process_comm,
.mmap = perf_event__process_mmap,
.namespaces = perf_event__process_namespaces,
.tracing_data = perf_event__process_tracing_data,
.ordered_events = true,
};
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = force,
};
session = perf_session__new(&data, &eops);
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
return PTR_ERR(session);
}
symbol_conf.allow_aliases = true;
symbol__init(&session->header.env);
if (!data.is_pipe) {
if (!perf_session__has_traces(session, "lock record"))
goto out_delete;
if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
pr_err("Initializing perf session tracepoint handlers failed\n");
goto out_delete;
}
if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
pr_err("Initializing perf session tracepoint handlers failed\n");
goto out_delete;
}
}
if (setup_output_field(false, output_fields))
goto out_delete;
if (select_key(false))
goto out_delete;
if (show_thread_stats)
aggr_mode = LOCK_AGGR_TASK;
err = perf_session__process_events(session);
if (err)
goto out_delete;
setup_pager();
if (display_info) /* used for info subcommand */
err = dump_info();
else {
combine_result();
sort_result();
print_result();
}
out_delete:
perf_session__delete(session);
return err;
}
static void sighandler(int sig __maybe_unused)
{
}
static int check_lock_contention_options(const struct option *options,
const char * const *usage)
{
if (show_thread_stats && show_lock_addrs) {
pr_err("Cannot use thread and addr mode together\n");
parse_options_usage(usage, options, "threads", 0);
parse_options_usage(NULL, options, "lock-addr", 0);
return -1;
}
if (show_lock_owner && !use_bpf) {
pr_err("Lock owners are available only with BPF\n");
parse_options_usage(usage, options, "lock-owner", 0);
parse_options_usage(NULL, options, "use-bpf", 0);
return -1;
}
if (show_lock_owner && show_lock_addrs) {
pr_err("Cannot use owner and addr mode together\n");
parse_options_usage(usage, options, "lock-owner", 0);
parse_options_usage(NULL, options, "lock-addr", 0);
return -1;
}
if (symbol_conf.field_sep) {
if (strstr(symbol_conf.field_sep, ":") || /* part of type flags */
strstr(symbol_conf.field_sep, "+") || /* part of caller offset */
strstr(symbol_conf.field_sep, ".")) { /* can be in a symbol name */
pr_err("Cannot use the separator that is already used\n");
parse_options_usage(usage, options, "x", 1);
return -1;
}
}
if (show_lock_owner)
show_thread_stats = true;
return 0;
}
static int __cmd_contention(int argc, const char **argv)
{
int err = -EINVAL;
struct perf_tool eops = {
.attr = perf_event__process_attr,
.event_update = process_event_update,
.sample = process_sample_event,
.comm = perf_event__process_comm,
.mmap = perf_event__process_mmap,
.tracing_data = perf_event__process_tracing_data,
.ordered_events = true,
};
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = force,
};
struct lock_contention con = {
.target = &target,
.map_nr_entries = bpf_map_entries,
.max_stack = max_stack_depth,
.stack_skip = stack_skip,
.filters = &filters,
.save_callstack = needs_callstack(),
.owner = show_lock_owner,
};
lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
if (!lockhash_table)
return -ENOMEM;
con.result = &lockhash_table[0];
session = perf_session__new(use_bpf ? NULL : &data, &eops);
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
err = PTR_ERR(session);
session = NULL;
goto out_delete;
}
con.machine = &session->machines.host;
con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
if (con.aggr_mode == LOCK_AGGR_CALLER)
con.save_callstack = true;
symbol_conf.allow_aliases = true;
symbol__init(&session->header.env);
if (use_bpf) {
err = target__validate(&target);
if (err) {
char errbuf[512];
target__strerror(&target, err, errbuf, 512);
pr_err("%s\n", errbuf);
goto out_delete;
}
signal(SIGINT, sighandler);
signal(SIGCHLD, sighandler);
signal(SIGTERM, sighandler);
con.evlist = evlist__new();
if (con.evlist == NULL) {
err = -ENOMEM;
goto out_delete;
}
err = evlist__create_maps(con.evlist, &target);
if (err < 0)
goto out_delete;
if (argc) {
err = evlist__prepare_workload(con.evlist, &target,
argv, false, NULL);
if (err < 0)
goto out_delete;
}
if (lock_contention_prepare(&con) < 0) {
pr_err("lock contention BPF setup failed\n");
goto out_delete;
}
} else if (!data.is_pipe) {
if (!perf_session__has_traces(session, "lock record"))
goto out_delete;
if (!evlist__find_evsel_by_str(session->evlist,
"lock:contention_begin")) {
pr_err("lock contention evsel not found\n");
goto out_delete;
}
if (perf_session__set_tracepoints_handlers(session,
contention_tracepoints)) {
pr_err("Initializing perf session tracepoint handlers failed\n");
goto out_delete;
}
}
if (setup_output_field(true, output_fields))
goto out_delete;
if (select_key(true))
goto out_delete;
if (symbol_conf.field_sep) {
int i;
struct lock_key *keys = contention_keys;
/* do not align output in CSV format */
for (i = 0; keys[i].name; i++)
keys[i].len = 0;
}
if (use_bpf) {
lock_contention_start();
if (argc)
evlist__start_workload(con.evlist);
/* wait for signal */
pause();
lock_contention_stop();
lock_contention_read(&con);
} else {
err = perf_session__process_events(session);
if (err)
goto out_delete;
}
setup_pager();
sort_contention_result();
print_contention_result(&con);
out_delete:
lock_filter_finish();
evlist__delete(con.evlist);
lock_contention_finish();
perf_session__delete(session);
zfree(&lockhash_table);
return err;
}
static int __cmd_record(int argc, const char **argv)
{
const char *record_args[] = {
"record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
};
const char *callgraph_args[] = {
"--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
};
unsigned int rec_argc, i, j, ret;
unsigned int nr_tracepoints;
unsigned int nr_callgraph_args = 0;
const char **rec_argv;
bool has_lock_stat = true;
for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
pr_debug("tracepoint %s is not enabled. "
"Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
lock_tracepoints[i].name);
has_lock_stat = false;
break;
}
}
if (has_lock_stat)
goto setup_args;
for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
pr_err("tracepoint %s is not enabled.\n",
contention_tracepoints[i].name);
return 1;
}
}
nr_callgraph_args = ARRAY_SIZE(callgraph_args);
setup_args:
rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
if (has_lock_stat)
nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
else
nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
/* factor of 2 is for -e in front of each tracepoint */
rec_argc += 2 * nr_tracepoints;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
for (j = 0; j < nr_tracepoints; j++) {
const char *ev_name;
if (has_lock_stat)
ev_name = strdup(lock_tracepoints[j].name);
else
ev_name = strdup(contention_tracepoints[j].name);
if (!ev_name)
return -ENOMEM;
rec_argv[i++] = "-e";
rec_argv[i++] = ev_name;
}
for (j = 0; j < nr_callgraph_args; j++, i++)
rec_argv[i] = callgraph_args[j];
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
BUG_ON(i != rec_argc);
ret = cmd_record(i, rec_argv);
free(rec_argv);
return ret;
}
static int parse_map_entry(const struct option *opt, const char *str,
int unset __maybe_unused)
{
unsigned long *len = (unsigned long *)opt->value;
unsigned long val;
char *endptr;
errno = 0;
val = strtoul(str, &endptr, 0);
if (*endptr != '\0' || errno != 0) {
pr_err("invalid BPF map length: %s\n", str);
return -1;
}
*len = val;
return 0;
}
static int parse_max_stack(const struct option *opt, const char *str,
int unset __maybe_unused)
{
unsigned long *len = (unsigned long *)opt->value;
long val;
char *endptr;
errno = 0;
val = strtol(str, &endptr, 0);
if (*endptr != '\0' || errno != 0) {
pr_err("invalid max stack depth: %s\n", str);
return -1;
}
if (val < 0 || val > sysctl__max_stack()) {
pr_err("invalid max stack depth: %ld\n", val);
return -1;
}
*len = val;
return 0;
}
static bool add_lock_type(unsigned int flags)
{
unsigned int *tmp;
tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types));
if (tmp == NULL)
return false;
tmp[filters.nr_types++] = flags;
filters.types = tmp;
return true;
}
static int parse_lock_type(const struct option *opt __maybe_unused, const char *str,
int unset __maybe_unused)
{
char *s, *tmp, *tok;
int ret = 0;
s = strdup(str);
if (s == NULL)
return -1;
for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
unsigned int flags = get_type_flag(tok);
if (flags == -1U) {
pr_err("Unknown lock flags: %s\n", tok);
ret = -1;
break;
}
if (!add_lock_type(flags)) {
ret = -1;
break;
}
}
free(s);
return ret;
}
static bool add_lock_addr(unsigned long addr)
{
unsigned long *tmp;
tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs));
if (tmp == NULL) {
pr_err("Memory allocation failure\n");
return false;
}
tmp[filters.nr_addrs++] = addr;
filters.addrs = tmp;
return true;
}
static bool add_lock_sym(char *name)
{
char **tmp;
char *sym = strdup(name);
if (sym == NULL) {
pr_err("Memory allocation failure\n");
return false;
}
tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms));
if (tmp == NULL) {
pr_err("Memory allocation failure\n");
free(sym);
return false;
}
tmp[filters.nr_syms++] = sym;
filters.syms = tmp;
return true;
}
static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
int unset __maybe_unused)
{
char *s, *tmp, *tok;
int ret = 0;
u64 addr;
s = strdup(str);
if (s == NULL)
return -1;
for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
char *end;
addr = strtoul(tok, &end, 16);
if (*end == '\0') {
if (!add_lock_addr(addr)) {
ret = -1;
break;
}
continue;
}
/*
* At this moment, we don't have kernel symbols. Save the symbols
* in a separate list and resolve them to addresses later.
*/
if (!add_lock_sym(tok)) {
ret = -1;
break;
}
}
free(s);
return ret;
}
static int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
int unset __maybe_unused)
{
char *s, *tmp, *tok;
int ret = 0;
s = strdup(str);
if (s == NULL)
return -1;
for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
struct callstack_filter *entry;
entry = malloc(sizeof(*entry) + strlen(tok) + 1);
if (entry == NULL) {
pr_err("Memory allocation failure\n");
return -1;
}
strcpy(entry->name, tok);
list_add_tail(&entry->list, &callstack_filters);
}
free(s);
return ret;
}
static int parse_output(const struct option *opt __maybe_unused, const char *str,
int unset __maybe_unused)
{
const char **name = (const char **)opt->value;
if (str == NULL)
return -1;
lock_output = fopen(str, "w");
if (lock_output == NULL) {
pr_err("Cannot open %s\n", str);
return -1;
}
*name = str;
return 0;
}
int cmd_lock(int argc, const char **argv)
{
const struct option lock_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_CALLBACK(0, "output", &output_name, "file", "output file name", parse_output),
OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
OPT_END()
};
const struct option info_options[] = {
OPT_BOOLEAN('t', "threads", &info_threads,
"dump thread list in perf.data"),
OPT_BOOLEAN('m', "map", &info_map,
"map of lock instances (address:name table)"),
OPT_PARENT(lock_options)
};
const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
"key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
OPT_STRING('F', "field", &output_fields, NULL,
"output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
/* TODO: type */
OPT_BOOLEAN('c', "combine-locks", &combine_locks,
"combine locks in the same class"),
OPT_BOOLEAN('t', "threads", &show_thread_stats,
"show per-thread lock stats"),
OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
OPT_PARENT(lock_options)
};
struct option contention_options[] = {
OPT_STRING('k', "key", &sort_key, "wait_total",
"key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
"output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
OPT_BOOLEAN('t', "threads", &show_thread_stats,
"show per-thread lock stats"),
OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"System-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
"List of cpus to monitor"),
OPT_STRING('p', "pid", &target.pid, "pid",
"Trace on existing process id"),
OPT_STRING(0, "tid", &target.tid, "tid",
"Trace on existing thread id (exclusive to --pid)"),
OPT_CALLBACK('M', "map-nr-entries", &bpf_map_entries, "num",
"Max number of BPF map entries", parse_map_entry),
OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
"Set the maximum stack depth when collecting lock contention, "
"Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
OPT_INTEGER(0, "stack-skip", &stack_skip,
"Set the number of stack depth to skip when finding a lock caller, "
"Default: " __stringify(CONTENTION_STACK_SKIP)),
OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"),
OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS",
"Filter specific type of locks", parse_lock_type),
OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES",
"Filter specific address/symbol of locks", parse_lock_addr),
OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES",
"Filter specific function in the callstack", parse_call_stack),
OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"),
OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep, "separator",
"print result in CSV format with custom separator"),
OPT_PARENT(lock_options)
};
const char * const info_usage[] = {
"perf lock info [<options>]",
NULL
};
const char *const lock_subcommands[] = { "record", "report", "script",
"info", "contention", NULL };
const char *lock_usage[] = {
NULL,
NULL
};
const char * const report_usage[] = {
"perf lock report [<options>]",
NULL
};
const char * const contention_usage[] = {
"perf lock contention [<options>]",
NULL
};
unsigned int i;
int rc = 0;
lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
if (!lockhash_table)
return -ENOMEM;
for (i = 0; i < LOCKHASH_SIZE; i++)
INIT_HLIST_HEAD(lockhash_table + i);
lock_output = stderr;
argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(lock_usage, lock_options);
if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
return __cmd_record(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
trace_handler = &report_lock_ops;
if (argc) {
argc = parse_options(argc, argv,
report_options, report_usage, 0);
if (argc)
usage_with_options(report_usage, report_options);
}
rc = __cmd_report(false);
} else if (!strcmp(argv[0], "script")) {
/* Aliased to 'perf script' */
rc = cmd_script(argc, argv);
} else if (!strcmp(argv[0], "info")) {
if (argc) {
argc = parse_options(argc, argv,
info_options, info_usage, 0);
if (argc)
usage_with_options(info_usage, info_options);
}
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
rc = __cmd_report(true);
} else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
trace_handler = &contention_lock_ops;
sort_key = "wait_total";
output_fields = "contended,wait_total,wait_max,avg_wait";
#ifndef HAVE_BPF_SKEL
set_option_nobuild(contention_options, 'b', "use-bpf",
"no BUILD_BPF_SKEL=1", false);
#endif
if (argc) {
argc = parse_options(argc, argv, contention_options,
contention_usage, 0);
}
if (check_lock_contention_options(contention_options,
contention_usage) < 0)
return -1;
rc = __cmd_contention(argc, argv);
} else {
usage_with_options(lock_usage, lock_options);
}
zfree(&lockhash_table);
return rc;
}
| linux-master | tools/perf/builtin-lock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-top.c
*
* Builtin top command: Display a continuously updated profile of
* any workload, CPU or specific PID.
*
* Copyright (C) 2008, Red Hat Inc, Ingo Molnar <[email protected]>
* 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*
* Improvements and fixes by:
*
* Arjan van de Ven <[email protected]>
* Yanmin Zhang <[email protected]>
* Wu Fengguang <[email protected]>
* Mike Galbraith <[email protected]>
* Paul Mackerras <[email protected]>
*/
#include "builtin.h"
#include "perf.h"
#include "util/annotate.h"
#include "util/bpf-event.h"
#include "util/cgroup.h"
#include "util/config.h"
#include "util/color.h"
#include "util/dso.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/mmap.h"
#include "util/session.h"
#include "util/thread.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/top.h"
#include "util/util.h"
#include <linux/rbtree.h>
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/callchain.h"
#include "util/cpumap.h"
#include "util/sort.h"
#include "util/string2.h"
#include "util/term.h"
#include "util/intlist.h"
#include "util/parse-branch-options.h"
#include "arch/common.h"
#include "ui/ui.h"
#include "util/debug.h"
#include "util/ordered-events.h"
#include "util/pfm.h"
#include <assert.h>
#include <elf.h>
#include <fcntl.h>
#include <stdio.h>
#include <termios.h>
#include <unistd.h>
#include <inttypes.h>
#include <errno.h>
#include <time.h>
#include <sched.h>
#include <signal.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/utsname.h>
#include <sys/mman.h>
#include <linux/stringify.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <perf/mmap.h>
static volatile sig_atomic_t done;
static volatile sig_atomic_t resize;
#define HEADER_LINE_NR 5
static void perf_top__update_print_entries(struct perf_top *top)
{
top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
}
static void winch_sig(int sig __maybe_unused)
{
resize = 1;
}
static void perf_top__resize(struct perf_top *top)
{
get_term_dimensions(&top->winsize);
perf_top__update_print_entries(top);
}
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
struct evsel *evsel;
struct symbol *sym;
struct annotation *notes;
struct map *map;
struct dso *dso;
int err = -1;
if (!he || !he->ms.sym)
return -1;
evsel = hists_to_evsel(he->hists);
sym = he->ms.sym;
map = he->ms.map;
dso = map__dso(map);
/*
* We can't annotate with just /proc/kallsyms
*/
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
pr_err("Can't annotate %s: No vmlinux file was found in the "
"path\n", sym->name);
sleep(1);
return -1;
}
notes = symbol__annotation(sym);
annotation__lock(notes);
if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
annotation__unlock(notes);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
sleep(1);
return err;
}
err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
if (err == 0) {
top->sym_filter_entry = he;
} else {
char msg[BUFSIZ];
symbol__strerror_disassemble(&he->ms, err, msg, sizeof(msg));
pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
}
annotation__unlock(notes);
return err;
}
static void __zero_source_counters(struct hist_entry *he)
{
struct symbol *sym = he->ms.sym;
symbol__annotate_zero_histograms(sym);
}
static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
{
struct utsname uts;
int err = uname(&uts);
struct dso *dso = map__dso(map);
ui__warning("Out of bounds address found:\n\n"
"Addr: %" PRIx64 "\n"
"DSO: %s %c\n"
"Map: %" PRIx64 "-%" PRIx64 "\n"
"Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
"Arch: %s\n"
"Kernel: %s\n"
"Tools: %s\n\n"
"Not all samples will be on the annotation output.\n\n"
"Please report to [email protected]\n",
ip, dso->long_name, dso__symtab_origin(dso),
map__start(map), map__end(map), sym->start, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
err ? "[unknown]" : uts.machine,
err ? "[unknown]" : uts.release, perf_version_string);
if (use_browser <= 0)
sleep(5);
map__set_erange_warned(map, true);
}
static void perf_top__record_precise_ip(struct perf_top *top,
struct hist_entry *he,
struct perf_sample *sample,
struct evsel *evsel, u64 ip)
EXCLUSIVE_LOCKS_REQUIRED(he->hists->lock)
{
struct annotation *notes;
struct symbol *sym = he->ms.sym;
int err = 0;
if (sym == NULL || (use_browser == 0 &&
(top->sym_filter_entry == NULL ||
top->sym_filter_entry->ms.sym != sym)))
return;
notes = symbol__annotation(sym);
if (!annotation__trylock(notes))
return;
err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
annotation__unlock(notes);
if (unlikely(err)) {
/*
* This function is now called with he->hists->lock held.
* Release it before going to sleep.
*/
mutex_unlock(&he->hists->lock);
if (err == -ERANGE && !map__erange_warned(he->ms.map))
ui__warn_map_erange(he->ms.map, sym, ip);
else if (err == -ENOMEM) {
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
sleep(1);
}
mutex_lock(&he->hists->lock);
}
}
static void perf_top__show_details(struct perf_top *top)
{
struct hist_entry *he = top->sym_filter_entry;
struct evsel *evsel;
struct annotation *notes;
struct symbol *symbol;
int more;
if (!he)
return;
evsel = hists_to_evsel(he->hists);
symbol = he->ms.sym;
notes = symbol__annotation(symbol);
annotation__lock(notes);
symbol__calc_percent(symbol, evsel);
if (notes->src == NULL)
goto out_unlock;
printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
if (top->evlist->enabled) {
if (top->zero)
symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx);
else
symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx);
}
if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
out_unlock:
annotation__unlock(notes);
}
static void perf_top__resort_hists(struct perf_top *t)
{
struct evlist *evlist = t->evlist;
struct evsel *pos;
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
/*
* unlink existing entries so that they can be linked
* in a correct order in hists__match() below.
*/
hists__unlink(hists);
if (evlist->enabled) {
if (t->zero) {
hists__delete_entries(hists);
} else {
hists__decay_entries(hists, t->hide_user_symbols,
t->hide_kernel_symbols);
}
}
hists__collapse_resort(hists, NULL);
/* Non-group events are considered as leader */
if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
struct hists *leader_hists = evsel__hists(evsel__leader(pos));
hists__match(leader_hists, hists);
hists__link(leader_hists, hists);
}
}
evlist__for_each_entry(evlist, pos) {
evsel__output_resort(pos, NULL);
}
}
static void perf_top__print_sym_table(struct perf_top *top)
{
char bf[160];
int printed = 0;
const int win_width = top->winsize.ws_col - 1;
struct evsel *evsel = top->sym_evsel;
struct hists *hists = evsel__hists(evsel);
puts(CONSOLE_CLEAR);
perf_top__header_snprintf(top, bf, sizeof(bf));
printf("%s\n", bf);
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
if (!top->record_opts.overwrite &&
(top->evlist->stats.nr_lost_warned !=
top->evlist->stats.nr_events[PERF_RECORD_LOST])) {
top->evlist->stats.nr_lost_warned =
top->evlist->stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: LOST %d chunks, Check IO/CPU overload",
top->evlist->stats.nr_lost_warned);
++printed;
}
if (top->sym_filter_entry) {
perf_top__show_details(top);
return;
}
perf_top__resort_hists(top);
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
hists__fprintf(hists, false, top->print_entries - printed, win_width,
top->min_percent, stdout, !symbol_conf.use_callchain);
}
static void prompt_integer(int *target, const char *msg)
{
char *buf = malloc(0), *p;
size_t dummy = 0;
int tmp;
fprintf(stdout, "\n%s: ", msg);
if (getline(&buf, &dummy, stdin) < 0)
return;
p = strchr(buf, '\n');
if (p)
*p = 0;
p = buf;
while(*p) {
if (!isdigit(*p))
goto out_free;
p++;
}
tmp = strtoul(buf, NULL, 10);
*target = tmp;
out_free:
free(buf);
}
static void prompt_percent(int *target, const char *msg)
{
int tmp = 0;
prompt_integer(&tmp, msg);
if (tmp >= 0 && tmp <= 100)
*target = tmp;
}
static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
{
char *buf = NULL, *p;
struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
struct hists *hists = evsel__hists(top->sym_evsel);
struct rb_node *next;
size_t dummy = 0;
/* zero counters of active symbol */
if (syme) {
__zero_source_counters(syme);
top->sym_filter_entry = NULL;
}
fprintf(stdout, "\n%s: ", msg);
if (getline(&buf, &dummy, stdin) < 0)
goto out_free;
p = strchr(buf, '\n');
if (p)
*p = 0;
next = rb_first_cached(&hists->entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
found = n;
break;
}
next = rb_next(&n->rb_node);
}
if (!found) {
fprintf(stderr, "Sorry, %s is not active.\n", buf);
sleep(1);
} else
perf_top__parse_source(top, found);
out_free:
free(buf);
}
static void perf_top__print_mapped_keys(struct perf_top *top)
{
char *name = NULL;
if (top->sym_filter_entry) {
struct symbol *sym = top->sym_filter_entry->ms.sym;
name = sym->name;
}
fprintf(stdout, "\nMapped keys:\n");
fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
if (top->evlist->core.nr_entries > 1)
fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evsel));
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
fprintf(stdout,
"\t[K] hide kernel symbols. \t(%s)\n",
top->hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
top->hide_user_symbols ? "yes" : "no");
fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
fprintf(stdout, "\t[qQ] quit.\n");
}
static int perf_top__key_mapped(struct perf_top *top, int c)
{
switch (c) {
case 'd':
case 'e':
case 'f':
case 'z':
case 'q':
case 'Q':
case 'K':
case 'U':
case 'F':
case 's':
case 'S':
return 1;
case 'E':
return top->evlist->core.nr_entries > 1 ? 1 : 0;
default:
break;
}
return 0;
}
static bool perf_top__handle_keypress(struct perf_top *top, int c)
{
bool ret = true;
if (!perf_top__key_mapped(top, c)) {
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios save;
perf_top__print_mapped_keys(top);
fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
fflush(stdout);
set_term_quiet_input(&save);
poll(&stdin_poll, 1, -1);
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
if (!perf_top__key_mapped(top, c))
return ret;
}
switch (c) {
case 'd':
prompt_integer(&top->delay_secs, "Enter display delay");
if (top->delay_secs < 1)
top->delay_secs = 1;
break;
case 'e':
prompt_integer(&top->print_entries, "Enter display entries (lines)");
if (top->print_entries == 0) {
perf_top__resize(top);
signal(SIGWINCH, winch_sig);
} else {
signal(SIGWINCH, SIG_DFL);
}
break;
case 'E':
if (top->evlist->core.nr_entries > 1) {
/* Select 0 as the default event: */
int counter = 0;
fprintf(stderr, "\nAvailable events:");
evlist__for_each_entry(top->evlist, top->sym_evsel)
fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->core.nr_entries) {
top->sym_evsel = evlist__first(top->evlist);
fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
sleep(1);
break;
}
evlist__for_each_entry(top->evlist, top->sym_evsel)
if (top->sym_evsel->core.idx == counter)
break;
} else
top->sym_evsel = evlist__first(top->evlist);
break;
case 'f':
prompt_integer(&top->count_filter, "Enter display event count filter");
break;
case 'F':
prompt_percent(&top->annotation_opts.min_pcnt,
"Enter details display event filter (percent)");
break;
case 'K':
top->hide_kernel_symbols = !top->hide_kernel_symbols;
break;
case 'q':
case 'Q':
printf("exiting.\n");
if (top->dump_symtab)
perf_session__fprintf_dsos(top->session, stderr);
ret = false;
break;
case 's':
perf_top__prompt_symbol(top, "Enter details symbol");
break;
case 'S':
if (!top->sym_filter_entry)
break;
else {
struct hist_entry *syme = top->sym_filter_entry;
top->sym_filter_entry = NULL;
__zero_source_counters(syme);
}
break;
case 'U':
top->hide_user_symbols = !top->hide_user_symbols;
break;
case 'z':
top->zero = !top->zero;
break;
default:
break;
}
return ret;
}
static void perf_top__sort_new_samples(void *arg)
{
struct perf_top *t = arg;
if (t->evlist->selected != NULL)
t->sym_evsel = t->evlist->selected;
perf_top__resort_hists(t);
if (t->lost || t->drop)
pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
}
static void stop_top(void)
{
session_done = 1;
done = 1;
}
static void *display_thread_tui(void *arg)
{
struct evsel *pos;
struct perf_top *top = arg;
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
struct hist_browser_timer hbt = {
.timer = perf_top__sort_new_samples,
.arg = top,
.refresh = top->delay_secs,
};
int ret;
/* In order to read symbols from other namespaces perf to needs to call
* setns(2). This isn't permitted if the struct_fs has multiple users.
* unshare(2) the fs so that we may continue to setns into namespaces
* that we're observing.
*/
unshare(CLONE_FS);
prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
repeat:
perf_top__sort_new_samples(top);
/*
* Initialize the uid_filter_str, in the future the TUI will allow
* Zooming in/out UIDs. For now just use whatever the user passed
* via --uid.
*/
evlist__for_each_entry(top->evlist, pos) {
struct hists *hists = evsel__hists(pos);
hists->uid_filter_str = top->record_opts.target.uid_str;
}
ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
&top->session->header.env, !top->record_opts.overwrite,
&top->annotation_opts);
if (ret == K_RELOAD) {
top->zero = true;
goto repeat;
} else
stop_top();
return NULL;
}
static void display_sig(int sig __maybe_unused)
{
stop_top();
}
static void display_setup_sig(void)
{
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
signal(SIGINT, display_sig);
signal(SIGQUIT, display_sig);
signal(SIGTERM, display_sig);
}
static void *display_thread(void *arg)
{
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios save;
struct perf_top *top = arg;
int delay_msecs, c;
/* In order to read symbols from other namespaces perf to needs to call
* setns(2). This isn't permitted if the struct_fs has multiple users.
* unshare(2) the fs so that we may continue to setns into namespaces
* that we're observing.
*/
unshare(CLONE_FS);
prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
display_setup_sig();
pthread__unblock_sigwinch();
repeat:
delay_msecs = top->delay_secs * MSEC_PER_SEC;
set_term_quiet_input(&save);
/* trash return*/
clearerr(stdin);
if (poll(&stdin_poll, 1, 0) > 0)
getc(stdin);
while (!done) {
perf_top__print_sym_table(top);
/*
* Either timeout expired or we got an EINTR due to SIGWINCH,
* refresh screen in both cases.
*/
switch (poll(&stdin_poll, 1, delay_msecs)) {
case 0:
continue;
case -1:
if (errno == EINTR)
continue;
fallthrough;
default:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
if (perf_top__handle_keypress(top, c))
goto repeat;
stop_top();
}
}
tcsetattr(0, TCSAFLUSH, &save);
return NULL;
}
static int hist_iter__top_callback(struct hist_entry_iter *iter,
struct addr_location *al, bool single,
void *arg)
EXCLUSIVE_LOCKS_REQUIRED(iter->he->hists->lock)
{
struct perf_top *top = arg;
struct evsel *evsel = iter->evsel;
if (perf_hpp_list.sym && single)
perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
!(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
NULL);
return 0;
}
static void perf_event__process_sample(struct perf_tool *tool,
const union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_top *top = container_of(tool, struct perf_top, tool);
struct addr_location al;
if (!machine && perf_guest) {
static struct intlist *seen;
if (!seen)
seen = intlist__new(NULL);
if (!intlist__has_entry(seen, sample->pid)) {
pr_err("Can't find guest [%d]'s kernel information\n",
sample->pid);
intlist__add(seen, sample->pid);
}
return;
}
if (!machine) {
pr_err("%u unprocessable samples recorded.\r",
top->session->evlist->stats.nr_unprocessable_samples++);
return;
}
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
top->exact_samples++;
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0)
goto out;
if (top->stitch_lbr)
thread__set_lbr_stitch_enable(al.thread, true);
if (!machine->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
if (!evlist__exclude_kernel(top->session->evlist)) {
ui__warning(
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
"Kernel%s samples will not be resolved.\n",
al.map && map__has_symbols(al.map) ?
" modules" : "");
if (use_browser <= 0)
sleep(5);
}
machine->kptr_restrict_warned = true;
}
if (al.sym == NULL && al.map != NULL) {
const char *msg = "Kernel samples will not be resolved.\n";
/*
* As we do lazy loading of symtabs we only will know if the
* specified vmlinux file is invalid when we actually have a
* hit in kernel space and then try to load it. So if we get
* here and there are _no_ symbols in the DSO backing the
* kernel map, bail out.
*
* We may never get here, for instance, if we use -K/
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
__map__is_kernel(al.map) && map__has_symbols(al.map)) {
if (symbol_conf.vmlinux_name) {
char serr[256];
dso__strerror_load(map__dso(al.map), serr, sizeof(serr));
ui__warning("The %s file can't be used: %s\n%s",
symbol_conf.vmlinux_name, serr, msg);
} else {
ui__warning("A vmlinux file was not found.\n%s",
msg);
}
if (use_browser <= 0)
sleep(5);
top->vmlinux_warned = true;
}
}
if (al.sym == NULL || !al.sym->idle) {
struct hists *hists = evsel__hists(evsel);
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = sample,
.add_entry_cb = hist_iter__top_callback,
};
if (symbol_conf.cumulate_callchain)
iter.ops = &hist_iter_cumulative;
else
iter.ops = &hist_iter_normal;
mutex_lock(&hists->lock);
if (hist_entry_iter__add(&iter, &al, top->max_stack, top) < 0)
pr_err("Problem incrementing symbol period, skipping event\n");
mutex_unlock(&hists->lock);
}
out:
addr_location__exit(&al);
}
static void
perf_top__process_lost(struct perf_top *top, union perf_event *event,
struct evsel *evsel)
{
top->lost += event->lost.lost;
top->lost_total += event->lost.lost;
evsel->evlist->stats.total_lost += event->lost.lost;
}
static void
perf_top__process_lost_samples(struct perf_top *top,
union perf_event *event,
struct evsel *evsel)
{
top->lost += event->lost_samples.lost;
top->lost_total += event->lost_samples.lost;
evsel->evlist->stats.total_lost_samples += event->lost_samples.lost;
}
static u64 last_timestamp;
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{
struct record_opts *opts = &top->record_opts;
struct evlist *evlist = top->evlist;
struct mmap *md;
union perf_event *event;
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
if (perf_mmap__read_init(&md->core) < 0)
return;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
int ret;
ret = evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
if (ret && ret != -1)
break;
ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0, NULL);
if (ret)
break;
perf_mmap__consume(&md->core);
if (top->qe.rotate) {
mutex_lock(&top->qe.mutex);
top->qe.rotate = false;
cond_signal(&top->qe.cond);
mutex_unlock(&top->qe.mutex);
}
}
perf_mmap__read_done(&md->core);
}
static void perf_top__mmap_read(struct perf_top *top)
{
bool overwrite = top->record_opts.overwrite;
struct evlist *evlist = top->evlist;
int i;
if (overwrite)
evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
for (i = 0; i < top->evlist->core.nr_mmaps; i++)
perf_top__mmap_read_idx(top, i);
if (overwrite) {
evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
}
}
/*
* Check per-event overwrite term.
* perf top should support consistent term for all events.
* - All events don't have per-event term
* E.g. "cpu/cpu-cycles/,cpu/instructions/"
* Nothing change, return 0.
* - All events have same per-event term
* E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
* Using the per-event setting to replace the opts->overwrite if
* they are different, then return 0.
* - Events have different per-event term
* E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
* Return -1
* - Some of the event set per-event term, but some not.
* E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
* Return -1
*/
static int perf_top__overwrite_check(struct perf_top *top)
{
struct record_opts *opts = &top->record_opts;
struct evlist *evlist = top->evlist;
struct evsel_config_term *term;
struct list_head *config_terms;
struct evsel *evsel;
int set, overwrite = -1;
evlist__for_each_entry(evlist, evsel) {
set = -1;
config_terms = &evsel->config_terms;
list_for_each_entry(term, config_terms, list) {
if (term->type == EVSEL__CONFIG_TERM_OVERWRITE)
set = term->val.overwrite ? 1 : 0;
}
/* no term for current and previous event (likely) */
if ((overwrite < 0) && (set < 0))
continue;
/* has term for both current and previous event, compare */
if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
return -1;
/* no term for current event but has term for previous one */
if ((overwrite >= 0) && (set < 0))
return -1;
/* has term for current event */
if ((overwrite < 0) && (set >= 0)) {
/* if it's first event, set overwrite */
if (evsel == evlist__first(evlist))
overwrite = set;
else
return -1;
}
}
if ((overwrite >= 0) && (opts->overwrite != overwrite))
opts->overwrite = overwrite;
return 0;
}
static int perf_top_overwrite_fallback(struct perf_top *top,
struct evsel *evsel)
{
struct record_opts *opts = &top->record_opts;
struct evlist *evlist = top->evlist;
struct evsel *counter;
if (!opts->overwrite)
return 0;
/* only fall back when first event fails */
if (evsel != evlist__first(evlist))
return 0;
evlist__for_each_entry(evlist, counter)
counter->core.attr.write_backward = false;
opts->overwrite = false;
pr_debug2("fall back to non-overwrite mode\n");
return 1;
}
static int perf_top__start_counters(struct perf_top *top)
{
char msg[BUFSIZ];
struct evsel *counter;
struct evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
if (perf_top__overwrite_check(top)) {
ui__error("perf top only support consistent per-event "
"overwrite setting for all events\n");
goto out_err;
}
evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, counter) {
try_again:
if (evsel__open(counter, top->evlist->core.user_requested_cpus,
top->evlist->core.threads) < 0) {
/*
* Specially handle overwrite fall back.
* Because perf top is the only tool which has
* overwrite mode by default, support
* both overwrite and non-overwrite mode, and
* require consistent mode for all events.
*
* May move it to generic code with more tools
* have similar attribute.
*/
if (perf_missing_features.write_backward &&
perf_top_overwrite_fallback(top, counter))
goto try_again;
if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out_err;
}
}
if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
errno, str_error_r(errno, msg, sizeof(msg)));
goto out_err;
}
return 0;
out_err:
return -1;
}
static int callchain_param__setup_sample_type(struct callchain_param *callchain)
{
if (callchain->mode != CHAIN_NONE) {
if (callchain_register_param(callchain) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
}
}
return 0;
}
static struct ordered_events *rotate_queues(struct perf_top *top)
{
struct ordered_events *in = top->qe.in;
if (top->qe.in == &top->qe.data[1])
top->qe.in = &top->qe.data[0];
else
top->qe.in = &top->qe.data[1];
return in;
}
static void *process_thread(void *arg)
{
struct perf_top *top = arg;
while (!done) {
struct ordered_events *out, *in = top->qe.in;
if (!in->nr_events) {
usleep(100);
continue;
}
out = rotate_queues(top);
mutex_lock(&top->qe.mutex);
top->qe.rotate = true;
cond_wait(&top->qe.cond, &top->qe.mutex);
mutex_unlock(&top->qe.mutex);
if (ordered_events__flush(out, OE_FLUSH__TOP))
pr_err("failed to process events\n");
}
return NULL;
}
/*
* Allow only 'top->delay_secs' seconds behind samples.
*/
static int should_drop(struct ordered_event *qevent, struct perf_top *top)
{
union perf_event *event = qevent->event;
u64 delay_timestamp;
if (event->header.type != PERF_RECORD_SAMPLE)
return false;
delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
return delay_timestamp < last_timestamp;
}
static int deliver_event(struct ordered_events *qe,
struct ordered_event *qevent)
{
struct perf_top *top = qe->data;
struct evlist *evlist = top->evlist;
struct perf_session *session = top->session;
union perf_event *event = qevent->event;
struct perf_sample sample;
struct evsel *evsel;
struct machine *machine;
int ret = -1;
if (should_drop(qevent, top)) {
top->drop++;
top->drop_total++;
return 0;
}
ret = evlist__parse_sample(evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
goto next_event;
}
evsel = evlist__id2evsel(session->evlist, sample.id);
assert(evsel != NULL);
if (event->header.type == PERF_RECORD_SAMPLE) {
if (evswitch__discard(&top->evswitch, evsel))
return 0;
++top->samples;
}
switch (sample.cpumode) {
case PERF_RECORD_MISC_USER:
++top->us_samples;
if (top->hide_user_symbols)
goto next_event;
machine = &session->machines.host;
break;
case PERF_RECORD_MISC_KERNEL:
++top->kernel_samples;
if (top->hide_kernel_symbols)
goto next_event;
machine = &session->machines.host;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
++top->guest_kernel_samples;
machine = perf_session__find_machine(session,
sample.pid);
break;
case PERF_RECORD_MISC_GUEST_USER:
++top->guest_us_samples;
/*
* TODO: we don't process guest user from host side
* except simple counting.
*/
goto next_event;
default:
if (event->header.type == PERF_RECORD_SAMPLE)
goto next_event;
machine = &session->machines.host;
break;
}
if (event->header.type == PERF_RECORD_SAMPLE) {
perf_event__process_sample(&top->tool, event, evsel,
&sample, machine);
} else if (event->header.type == PERF_RECORD_LOST) {
perf_top__process_lost(top, event, evsel);
} else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
perf_top__process_lost_samples(top, event, evsel);
} else if (event->header.type < PERF_RECORD_MAX) {
events_stats__inc(&session->evlist->stats, event->header.type);
machine__process_event(machine, event, &sample);
} else
++session->evlist->stats.nr_unknown_events;
ret = 0;
next_event:
return ret;
}
static void init_process_thread(struct perf_top *top)
{
ordered_events__init(&top->qe.data[0], deliver_event, top);
ordered_events__init(&top->qe.data[1], deliver_event, top);
ordered_events__set_copy_on_queue(&top->qe.data[0], true);
ordered_events__set_copy_on_queue(&top->qe.data[1], true);
top->qe.in = &top->qe.data[0];
mutex_init(&top->qe.mutex);
cond_init(&top->qe.cond);
}
static void exit_process_thread(struct perf_top *top)
{
ordered_events__free(&top->qe.data[0]);
ordered_events__free(&top->qe.data[1]);
mutex_destroy(&top->qe.mutex);
cond_destroy(&top->qe.cond);
}
static int __cmd_top(struct perf_top *top)
{
struct record_opts *opts = &top->record_opts;
pthread_t thread, thread_process;
int ret;
if (!top->annotation_opts.objdump_path) {
ret = perf_env__lookup_objdump(&top->session->header.env,
&top->annotation_opts.objdump_path);
if (ret)
return ret;
}
ret = callchain_param__setup_sample_type(&callchain_param);
if (ret)
return ret;
if (perf_session__register_idle_thread(top->session) < 0)
return ret;
if (top->nr_threads_synthesize > 1)
perf_set_multithreaded();
init_process_thread(top);
if (opts->record_namespaces)
top->tool.namespace_events = true;
if (opts->record_cgroup) {
#ifdef HAVE_FILE_HANDLE
top->tool.cgroup_events = true;
#else
pr_err("cgroup tracking is not supported.\n");
return -1;
#endif
}
ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
&top->session->machines.host,
&top->record_opts);
if (ret < 0)
pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process,
&top->session->machines.host);
if (ret < 0)
pr_debug("Couldn't synthesize cgroup events.\n");
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->core.threads, true, false,
top->nr_threads_synthesize);
perf_set_multithreaded();
if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
if (ret < 0) {
char errbuf[BUFSIZ];
const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
ui__error("Could not read the CPU topology map: %s\n", err);
return ret;
}
}
ret = perf_top__start_counters(top);
if (ret)
return ret;
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
/*
* When perf is starting the traced process, all the events (apart from
* group members) have enable_on_exec=1 set, so don't spoil it by
* prematurely enabling them.
*
* XXX 'top' still doesn't start workloads like record, trace, but should,
* so leave the check here.
*/
if (!target__none(&opts->target))
evlist__enable(top->evlist);
ret = -1;
if (pthread_create(&thread_process, NULL, process_thread, top)) {
ui__error("Could not create process thread.\n");
return ret;
}
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
ui__error("Could not create display thread.\n");
goto out_join_thread;
}
if (top->realtime_prio) {
struct sched_param param;
param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
ui__error("Could not set realtime priority.\n");
goto out_join;
}
}
/* Wait for a minimal set of events before starting the snapshot */
evlist__poll(top->evlist, 100);
perf_top__mmap_read(top);
while (!done) {
u64 hits = top->samples;
perf_top__mmap_read(top);
if (opts->overwrite || (hits == top->samples))
ret = evlist__poll(top->evlist, 100);
if (resize) {
perf_top__resize(top);
resize = 0;
}
}
ret = 0;
out_join:
pthread_join(thread, NULL);
out_join_thread:
cond_signal(&top->qe.cond);
pthread_join(thread_process, NULL);
perf_set_singlethreaded();
exit_process_thread(top);
return ret;
}
static int
callchain_opt(const struct option *opt, const char *arg, int unset)
{
symbol_conf.use_callchain = true;
return record_callchain_opt(opt, arg, unset);
}
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct callchain_param *callchain = opt->value;
callchain->enabled = !unset;
callchain->record_mode = CALLCHAIN_FP;
/*
* --no-call-graph
*/
if (unset) {
symbol_conf.use_callchain = false;
callchain->record_mode = CALLCHAIN_NONE;
return 0;
}
return parse_callchain_top_opt(arg);
}
static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
{
if (!strcmp(var, "top.call-graph")) {
var = "call-graph.record-mode";
return perf_default_config(var, value, cb);
}
if (!strcmp(var, "top.children")) {
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
}
return 0;
}
static int
parse_percent_limit(const struct option *opt, const char *arg,
int unset __maybe_unused)
{
struct perf_top *top = opt->value;
top->min_percent = strtof(arg, NULL);
return 0;
}
const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
"\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
int cmd_top(int argc, const char **argv)
{
char errbuf[BUFSIZ];
struct perf_top top = {
.count_filter = 5,
.delay_secs = 2,
.record_opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 4000, /* 4 KHz */
.target = {
.uses_mmap = true,
},
/*
* FIXME: This will lose PERF_RECORD_MMAP and other metadata
* when we pause, fix that and reenable. Probably using a
* separate evlist with a dummy event, i.e. a non-overwrite
* ring buffer just for metadata events, while PERF_RECORD_SAMPLE
* stays in overwrite mode. -acme
* */
.overwrite = 0,
.sample_time = true,
.sample_time_set = true,
},
.max_stack = sysctl__max_stack(),
.nr_threads_synthesize = UINT_MAX,
};
struct parse_events_option_args parse_events_option_args = {
.evlistp = &top.evlist,
};
bool branch_call_mode = false;
struct record_opts *opts = &top.record_opts;
struct target *target = &opts->target;
const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
const struct option options[] = {
OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
OPT_STRING('p', "pid", &target->pid, "pid",
"profile events on existing process id"),
OPT_STRING('t', "tid", &target->tid, "tid",
"profile events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
"list of cpus to monitor"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
"don't load vmlinux even if found"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
"number of mmap data pages", evlist__parse_mmap_pages),
OPT_INTEGER('r', "realtime", &top.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_INTEGER('d', "delay", &top.delay_secs,
"number of seconds to delay between refreshes"),
OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
"dump the symbol table used for profiling"),
OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
"child tasks do not inherit counters"),
OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
"symbol to annotate"),
OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
"profile at this frequency",
record__parse_freq),
OPT_INTEGER('E', "entries", &top.print_entries,
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
"hide user symbols"),
#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
#endif
OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
" Please refer the man page for the complete list."),
OPT_STRING(0, "fields", &field_order, "key[,keys...]",
"output field(s): overhead, period, sample plus all of sort keys"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
NULL, "enables call-graph recording and display",
&callchain_opt),
OPT_CALLBACK(0, "call-graph", &callchain_param,
"record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
top_callchain_help, &parse_callchain_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
"Accumulate callchains of children and show total overhead as well"),
OPT_INTEGER(0, "max-stack", &top.max_stack,
"Set the maximum stack depth when parsing the callchain. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
"ignore callees of these functions in call graphs",
report_parse_ignore_callees_opt),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING(0, "addr2line", &addr2line_path, "path",
"addr2line binary to use for line numbers"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
OPT_CALLBACK(0, "percent-limit", &top, "percent",
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
"width[,width...]",
"don't try to adjust column width, use these fixed values"),
OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
"branch any", "sample any taken branches",
parse_branch_stack),
OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
"branch filter mask", "branch stack filter modes",
parse_branch_stack),
OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
"add last branch records to call history"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
"Show raw trace event output (do not use print fmt or plugins)"),
OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
"Show entries in a hierarchy"),
OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
"Use a backward ring buffer, default: no"),
OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
"number of thread to run event synthesize"),
OPT_CALLBACK('G', "cgroup", &top.evlist, "name",
"monitor event in cgroup name only", parse_cgroups),
OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
"Record namespaces events"),
OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
"Record cgroup events"),
OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
"Sort the output by the event at the index n in group. "
"If n is invalid, sort by the first event. "
"WARNING: should be used on grouped events."),
OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
"Enable LBR callgraph stitching approach"),
#ifdef HAVE_LIBPFM
OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
"libpfm4 event selector. use 'perf list' to list available events",
parse_libpfm_events_option),
#endif
OPTS_EVSWITCH(&top.evswitch),
OPT_END()
};
const char * const top_usage[] = {
"perf top [<options>]",
NULL
};
int status = hists__init();
if (status < 0)
return status;
annotation_options__init(&top.annotation_opts);
top.annotation_opts.min_pcnt = 5;
top.annotation_opts.context = 4;
top.evlist = evlist__new();
if (top.evlist == NULL)
return -ENOMEM;
status = perf_config(perf_top_config, &top);
if (status)
return status;
/*
* Since the per arch annotation init routine may need the cpuid, read
* it here, since we are not getting this from the perf.data header.
*/
status = perf_env__read_cpuid(&perf_env);
if (status) {
/*
* Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
* warn the user explicitly.
*/
eprintf(status == ENOSYS ? 1 : 0, verbose,
"Couldn't read the cpuid for this machine: %s\n",
str_error_r(errno, errbuf, sizeof(errbuf)));
}
top.evlist->env = &perf_env;
argc = parse_options(argc, argv, options, top_usage, 0);
if (argc)
usage_with_options(top_usage, options);
if (disassembler_style) {
top.annotation_opts.disassembler_style = strdup(disassembler_style);
if (!top.annotation_opts.disassembler_style)
return -ENOMEM;
}
if (objdump_path) {
top.annotation_opts.objdump_path = strdup(objdump_path);
if (!top.annotation_opts.objdump_path)
return -ENOMEM;
}
if (addr2line_path) {
symbol_conf.addr2line_path = strdup(addr2line_path);
if (!symbol_conf.addr2line_path)
return -ENOMEM;
}
status = symbol__validate_sym_arguments();
if (status)
goto out_delete_evlist;
if (annotate_check_args(&top.annotation_opts) < 0)
goto out_delete_evlist;
if (!top.evlist->core.nr_entries) {
bool can_profile_kernel = perf_event_paranoid_check(1);
int err = parse_event(top.evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
if (err)
goto out_delete_evlist;
}
status = evswitch__init(&top.evswitch, top.evlist, stderr);
if (status)
goto out_delete_evlist;
if (symbol_conf.report_hierarchy) {
/* disable incompatible options */
symbol_conf.event_group = false;
symbol_conf.cumulate_callchain = false;
if (field_order) {
pr_err("Error: --hierarchy and --fields options cannot be used together\n");
parse_options_usage(top_usage, options, "fields", 0);
parse_options_usage(NULL, options, "hierarchy", 0);
goto out_delete_evlist;
}
}
if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
goto out_delete_evlist;
}
if (nr_cgroups > 0 && opts->record_cgroup) {
pr_err("--cgroup and --all-cgroups cannot be used together\n");
goto out_delete_evlist;
}
if (branch_call_mode) {
if (!opts->branch_stack)
opts->branch_stack = PERF_SAMPLE_BRANCH_ANY;
symbol_conf.use_callchain = true;
callchain_param.key = CCKEY_ADDRESS;
callchain_param.branch_callstack = true;
callchain_param.enabled = true;
if (callchain_param.record_mode == CALLCHAIN_NONE)
callchain_param.record_mode = CALLCHAIN_FP;
callchain_register_param(&callchain_param);
if (!sort_order)
sort_order = "srcline,symbol,dso";
}
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
perf_hpp_list.need_collapse = 1;
if (top.use_stdio)
use_browser = 0;
#ifdef HAVE_SLANG_SUPPORT
else if (top.use_tui)
use_browser = 1;
#endif
setup_browser(false);
if (setup_sorting(top.evlist) < 0) {
if (sort_order)
parse_options_usage(top_usage, options, "s", 1);
if (field_order)
parse_options_usage(sort_order ? NULL : top_usage,
options, "fields", 0);
goto out_delete_evlist;
}
status = target__validate(target);
if (status) {
target__strerror(target, status, errbuf, BUFSIZ);
ui__warning("%s\n", errbuf);
}
status = target__parse_uid(target);
if (status) {
int saved_errno = errno;
target__strerror(target, status, errbuf, BUFSIZ);
ui__error("%s\n", errbuf);
status = -saved_errno;
goto out_delete_evlist;
}
if (target__none(target))
target->system_wide = true;
if (evlist__create_maps(top.evlist, target) < 0) {
ui__error("Couldn't create thread/CPU maps: %s\n",
errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
status = -errno;
goto out_delete_evlist;
}
if (top.delay_secs < 1)
top.delay_secs = 1;
if (record_opts__config(opts)) {
status = -EINVAL;
goto out_delete_evlist;
}
top.sym_evsel = evlist__first(top.evlist);
if (!callchain_param.enabled) {
symbol_conf.cumulate_callchain = false;
perf_hpp__cancel_cumulate();
}
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
callchain_param.order = ORDER_CALLER;
status = symbol__annotation_init();
if (status < 0)
goto out_delete_evlist;
annotation_config__init(&top.annotation_opts);
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
status = symbol__init(NULL);
if (status < 0)
goto out_delete_evlist;
sort__setup_elide(stdout);
get_term_dimensions(&top.winsize);
if (top.print_entries == 0) {
perf_top__update_print_entries(&top);
signal(SIGWINCH, winch_sig);
}
top.session = perf_session__new(NULL, NULL);
if (IS_ERR(top.session)) {
status = PTR_ERR(top.session);
top.session = NULL;
goto out_delete_evlist;
}
#ifdef HAVE_LIBBPF_SUPPORT
if (!top.record_opts.no_bpf_event) {
top.sb_evlist = evlist__new();
if (top.sb_evlist == NULL) {
pr_err("Couldn't create side band evlist.\n.");
status = -EINVAL;
goto out_delete_evlist;
}
if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
status = -EINVAL;
goto out_delete_evlist;
}
}
#endif
if (evlist__start_sb_thread(top.sb_evlist, target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
status = __cmd_top(&top);
if (!opts->no_bpf_event)
evlist__stop_sb_thread(top.sb_evlist);
out_delete_evlist:
evlist__delete(top.evlist);
perf_session__delete(top.session);
annotation_options__exit(&top.annotation_opts);
return status;
}
| linux-master | tools/perf/builtin-top.c |
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
#include "perf.h"
#include "util/build-id.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/mmap.h"
#include "util/term.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
#include "util/intlist.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/debug.h"
#include "util/tool.h"
#include "util/stat.h"
#include "util/synthetic-events.h"
#include "util/top.h"
#include "util/data.h"
#include "util/ordered-events.h"
#include "util/kvm-stat.h"
#include "util/util.h"
#include "ui/browsers/hists.h"
#include "ui/progress.h"
#include "ui/ui.h"
#include "util/string2.h"
#include <sys/prctl.h>
#ifdef HAVE_TIMERFD_SUPPORT
#include <sys/timerfd.h>
#endif
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <errno.h>
#include <inttypes.h>
#include <poll.h>
#include <termios.h>
#include <semaphore.h>
#include <signal.h>
#include <math.h>
#include <perf/mmap.h>
#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
#define GET_EVENT_KEY(func, field) \
static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
{ \
if (vcpu == -1) \
return event->total.field; \
\
if (vcpu >= event->max_vcpu) \
return 0; \
\
return event->vcpu[vcpu].field; \
}
#define COMPARE_EVENT_KEY(func, field) \
GET_EVENT_KEY(func, field) \
static int64_t cmp_event_ ## func(struct kvm_event *one, \
struct kvm_event *two, int vcpu) \
{ \
return get_event_ ##func(one, vcpu) - \
get_event_ ##func(two, vcpu); \
}
COMPARE_EVENT_KEY(time, time);
COMPARE_EVENT_KEY(max, stats.max);
COMPARE_EVENT_KEY(min, stats.min);
COMPARE_EVENT_KEY(count, stats.n);
COMPARE_EVENT_KEY(mean, stats.mean);
struct kvm_hists {
struct hists hists;
struct perf_hpp_list list;
};
struct kvm_dimension {
const char *name;
const char *header;
int width;
int64_t (*cmp)(struct perf_hpp_fmt *fmt, struct hist_entry *left,
struct hist_entry *right);
int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
};
struct kvm_fmt {
struct perf_hpp_fmt fmt;
struct kvm_dimension *dim;
};
static struct kvm_hists kvm_hists;
static int64_t ev_name_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left,
struct hist_entry *right)
{
/* Return opposite number for sorting in alphabetical order */
return -strcmp(left->kvm_info->name, right->kvm_info->name);
}
static int fmt_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists __maybe_unused);
static int ev_name_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = fmt_width(fmt, hpp, he->hists);
return scnprintf(hpp->buf, hpp->size, "%*s", width, he->kvm_info->name);
}
static struct kvm_dimension dim_event = {
.header = "Event name",
.name = "ev_name",
.cmp = ev_name_cmp,
.entry = ev_name_entry,
.width = 40,
};
#define EV_METRIC_CMP(metric) \
static int64_t ev_cmp_##metric(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *left, \
struct hist_entry *right) \
{ \
struct kvm_event *event_left; \
struct kvm_event *event_right; \
struct perf_kvm_stat *perf_kvm; \
\
event_left = container_of(left, struct kvm_event, he); \
event_right = container_of(right, struct kvm_event, he); \
\
perf_kvm = event_left->perf_kvm; \
return cmp_event_##metric(event_left, event_right, \
perf_kvm->trace_vcpu); \
}
EV_METRIC_CMP(time)
EV_METRIC_CMP(count)
EV_METRIC_CMP(max)
EV_METRIC_CMP(min)
EV_METRIC_CMP(mean)
#define EV_METRIC_ENTRY(metric) \
static int ev_entry_##metric(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
struct kvm_event *event; \
int width = fmt_width(fmt, hpp, he->hists); \
struct perf_kvm_stat *perf_kvm; \
\
event = container_of(he, struct kvm_event, he); \
perf_kvm = event->perf_kvm; \
return scnprintf(hpp->buf, hpp->size, "%*lu", width, \
get_event_##metric(event, perf_kvm->trace_vcpu)); \
}
EV_METRIC_ENTRY(time)
EV_METRIC_ENTRY(count)
EV_METRIC_ENTRY(max)
EV_METRIC_ENTRY(min)
static struct kvm_dimension dim_time = {
.header = "Time (ns)",
.name = "time",
.cmp = ev_cmp_time,
.entry = ev_entry_time,
.width = 12,
};
static struct kvm_dimension dim_count = {
.header = "Samples",
.name = "sample",
.cmp = ev_cmp_count,
.entry = ev_entry_count,
.width = 12,
};
static struct kvm_dimension dim_max_time = {
.header = "Max Time (ns)",
.name = "max_t",
.cmp = ev_cmp_max,
.entry = ev_entry_max,
.width = 14,
};
static struct kvm_dimension dim_min_time = {
.header = "Min Time (ns)",
.name = "min_t",
.cmp = ev_cmp_min,
.entry = ev_entry_min,
.width = 14,
};
static int ev_entry_mean(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct kvm_event *event;
int width = fmt_width(fmt, hpp, he->hists);
struct perf_kvm_stat *perf_kvm;
event = container_of(he, struct kvm_event, he);
perf_kvm = event->perf_kvm;
return scnprintf(hpp->buf, hpp->size, "%*lu", width,
get_event_mean(event, perf_kvm->trace_vcpu));
}
static struct kvm_dimension dim_mean_time = {
.header = "Mean Time (ns)",
.name = "mean_t",
.cmp = ev_cmp_mean,
.entry = ev_entry_mean,
.width = 14,
};
#define PERC_STR(__s, __v) \
({ \
scnprintf(__s, sizeof(__s), "%.2F%%", __v); \
__s; \
})
static double percent(u64 st, u64 tot)
{
return tot ? 100. * (double) st / (double) tot : 0;
}
#define EV_METRIC_PERCENT(metric) \
static int ev_percent_##metric(struct hist_entry *he) \
{ \
struct kvm_event *event; \
struct perf_kvm_stat *perf_kvm; \
\
event = container_of(he, struct kvm_event, he); \
perf_kvm = event->perf_kvm; \
\
return percent(get_event_##metric(event, perf_kvm->trace_vcpu), \
perf_kvm->total_##metric); \
}
EV_METRIC_PERCENT(time)
EV_METRIC_PERCENT(count)
static int ev_entry_time_precent(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = fmt_width(fmt, hpp, he->hists);
double per;
char buf[10];
per = ev_percent_time(he);
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int64_t
ev_cmp_time_precent(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = ev_percent_time(left);
per_right = ev_percent_time(right);
return per_left - per_right;
}
static struct kvm_dimension dim_time_percent = {
.header = "Time%",
.name = "percent_time",
.cmp = ev_cmp_time_precent,
.entry = ev_entry_time_precent,
.width = 12,
};
static int ev_entry_count_precent(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hist_entry *he)
{
int width = fmt_width(fmt, hpp, he->hists);
double per;
char buf[10];
per = ev_percent_count(he);
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int64_t
ev_cmp_count_precent(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
double per_left;
double per_right;
per_left = ev_percent_count(left);
per_right = ev_percent_count(right);
return per_left - per_right;
}
static struct kvm_dimension dim_count_percent = {
.header = "Sample%",
.name = "percent_sample",
.cmp = ev_cmp_count_precent,
.entry = ev_entry_count_precent,
.width = 12,
};
static struct kvm_dimension *dimensions[] = {
&dim_event,
&dim_time,
&dim_time_percent,
&dim_count,
&dim_count_percent,
&dim_max_time,
&dim_min_time,
&dim_mean_time,
NULL,
};
static int fmt_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists __maybe_unused)
{
struct kvm_fmt *kvm_fmt;
kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
return kvm_fmt->dim->width;
}
static int fmt_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hists *hists, int line __maybe_unused,
int *span __maybe_unused)
{
struct kvm_fmt *kvm_fmt;
struct kvm_dimension *dim;
int width = fmt_width(fmt, hpp, hists);
kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
dim = kvm_fmt->dim;
return scnprintf(hpp->buf, hpp->size, "%*s", width, dim->header);
}
static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
{
struct kvm_fmt *kvm_fmt_a = container_of(a, struct kvm_fmt, fmt);
struct kvm_fmt *kvm_fmt_b = container_of(b, struct kvm_fmt, fmt);
return kvm_fmt_a->dim == kvm_fmt_b->dim;
}
static void fmt_free(struct perf_hpp_fmt *fmt)
{
struct kvm_fmt *kvm_fmt;
kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
free(kvm_fmt);
}
static struct kvm_dimension *get_dimension(const char *name)
{
unsigned int i;
for (i = 0; dimensions[i] != NULL; i++) {
if (!strcmp(dimensions[i]->name, name))
return dimensions[i];
}
return NULL;
}
static struct kvm_fmt *get_format(const char *name)
{
struct kvm_dimension *dim = get_dimension(name);
struct kvm_fmt *kvm_fmt;
struct perf_hpp_fmt *fmt;
if (!dim)
return NULL;
kvm_fmt = zalloc(sizeof(*kvm_fmt));
if (!kvm_fmt)
return NULL;
kvm_fmt->dim = dim;
fmt = &kvm_fmt->fmt;
INIT_LIST_HEAD(&fmt->list);
INIT_LIST_HEAD(&fmt->sort_list);
fmt->cmp = dim->cmp;
fmt->sort = dim->cmp;
fmt->color = NULL;
fmt->entry = dim->entry;
fmt->header = fmt_header;
fmt->width = fmt_width;
fmt->collapse = dim->cmp;
fmt->equal = fmt_equal;
fmt->free = fmt_free;
return kvm_fmt;
}
static int kvm_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
{
struct kvm_fmt *kvm_fmt = get_format(name);
if (!kvm_fmt) {
pr_warning("Fail to find format for output field %s.\n", name);
return -EINVAL;
}
perf_hpp_list__column_register(hpp_list, &kvm_fmt->fmt);
return 0;
}
static int kvm_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
{
struct kvm_fmt *kvm_fmt = get_format(name);
if (!kvm_fmt) {
pr_warning("Fail to find format for sorting %s.\n", name);
return -EINVAL;
}
perf_hpp_list__register_sort_field(hpp_list, &kvm_fmt->fmt);
return 0;
}
static int kvm_hpp_list__init(char *list,
struct perf_hpp_list *hpp_list,
int (*fn)(struct perf_hpp_list *hpp_list,
char *name))
{
char *tmp, *tok;
int ret;
if (!list || !fn)
return 0;
for (tok = strtok_r(list, ", ", &tmp); tok;
tok = strtok_r(NULL, ", ", &tmp)) {
ret = fn(hpp_list, tok);
if (!ret)
continue;
/* Handle errors */
if (ret == -EINVAL)
pr_err("Invalid field key: '%s'", tok);
else if (ret == -ESRCH)
pr_err("Unknown field key: '%s'", tok);
else
pr_err("Fail to initialize for field key: '%s'", tok);
break;
}
return ret;
}
static int kvm_hpp_list__parse(struct perf_hpp_list *hpp_list,
const char *output_, const char *sort_)
{
char *output = output_ ? strdup(output_) : NULL;
char *sort = sort_ ? strdup(sort_) : NULL;
int ret;
ret = kvm_hpp_list__init(output, hpp_list, kvm_hists__init_output);
if (ret)
goto out;
ret = kvm_hpp_list__init(sort, hpp_list, kvm_hists__init_sort);
if (ret)
goto out;
/* Copy sort keys to output fields */
perf_hpp__setup_output_field(hpp_list);
/* and then copy output fields to sort keys */
perf_hpp__append_sort_keys(hpp_list);
out:
free(output);
free(sort);
return ret;
}
static int kvm_hists__init(void)
{
kvm_hists.list.nr_header_lines = 1;
__hists__init(&kvm_hists.hists, &kvm_hists.list);
perf_hpp_list__init(&kvm_hists.list);
return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name");
}
static int kvm_hists__reinit(const char *output, const char *sort)
{
perf_hpp__reset_output_field(&kvm_hists.list);
return kvm_hpp_list__parse(&kvm_hists.list, output, sort);
}
static void print_result(struct perf_kvm_stat *kvm);
#ifdef HAVE_SLANG_SUPPORT
static void kvm_browser__update_nr_entries(struct hist_browser *hb)
{
struct rb_node *nd = rb_first_cached(&hb->hists->entries);
u64 nr_entries = 0;
for (; nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry,
rb_node);
if (!he->filtered)
nr_entries++;
}
hb->nr_non_filtered_entries = nr_entries;
}
static int kvm_browser__title(struct hist_browser *browser,
char *buf, size_t size)
{
scnprintf(buf, size, "KVM event statistics (%lu entries)",
browser->nr_non_filtered_entries);
return 0;
}
static struct hist_browser*
perf_kvm_browser__new(struct hists *hists)
{
struct hist_browser *browser = hist_browser__new(hists);
if (browser)
browser->title = kvm_browser__title;
return browser;
}
static int kvm__hists_browse(struct hists *hists)
{
struct hist_browser *browser;
int key = -1;
browser = perf_kvm_browser__new(hists);
if (browser == NULL)
return -1;
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
kvm_browser__update_nr_entries(browser);
while (1) {
key = hist_browser__run(browser, "? - help", true, 0);
switch (key) {
case 'q':
goto out;
default:
break;
}
}
out:
hist_browser__delete(browser);
return 0;
}
static void kvm_display(struct perf_kvm_stat *kvm)
{
if (!use_browser)
print_result(kvm);
else
kvm__hists_browse(&kvm_hists.hists);
}
#else
static void kvm_display(struct perf_kvm_stat *kvm)
{
use_browser = 0;
print_result(kvm);
}
#endif /* HAVE_SLANG_SUPPORT */
#endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
static const char *get_filename_for_perf_kvm(void)
{
const char *filename;
if (perf_host && !perf_guest)
filename = strdup("perf.data.host");
else if (!perf_host && perf_guest)
filename = strdup("perf.data.guest");
else
filename = strdup("perf.data.kvm");
return filename;
}
#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
void exit_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
key->info = 0;
key->key = evsel__intval(evsel, sample, kvm_exit_reason);
}
bool kvm_exit_event(struct evsel *evsel)
{
return evsel__name_is(evsel, kvm_exit_trace);
}
bool exit_event_begin(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
if (kvm_exit_event(evsel)) {
exit_event_get_key(evsel, sample, key);
return true;
}
return false;
}
bool kvm_entry_event(struct evsel *evsel)
{
return evsel__name_is(evsel, kvm_entry_trace);
}
bool exit_event_end(struct evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
return kvm_entry_event(evsel);
}
static const char *get_exit_reason(struct perf_kvm_stat *kvm,
struct exit_reasons_table *tbl,
u64 exit_code)
{
while (tbl->reason != NULL) {
if (tbl->exit_code == exit_code)
return tbl->reason;
tbl++;
}
pr_err("unknown kvm exit code:%lld on %s\n",
(unsigned long long)exit_code, kvm->exit_reasons_isa);
return "UNKNOWN";
}
void exit_event_decode_key(struct perf_kvm_stat *kvm,
struct event_key *key,
char *decode)
{
const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
key->key);
scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason);
}
static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
{
struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
if (!strcmp(events_ops->name, kvm->report_event)) {
kvm->events_ops = events_ops->ops;
return true;
}
}
return false;
}
struct vcpu_event_record {
int vcpu_id;
u64 start_time;
struct kvm_event *last_event;
};
#ifdef HAVE_TIMERFD_SUPPORT
static void clear_events_cache_stats(void)
{
struct rb_root_cached *root;
struct rb_node *nd;
struct kvm_event *event;
int i;
if (hists__has(&kvm_hists.hists, need_collapse))
root = &kvm_hists.hists.entries_collapsed;
else
root = kvm_hists.hists.entries_in;
for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
struct hist_entry *he;
he = rb_entry(nd, struct hist_entry, rb_node_in);
event = container_of(he, struct kvm_event, he);
/* reset stats for event */
event->total.time = 0;
init_stats(&event->total.stats);
for (i = 0; i < event->max_vcpu; ++i) {
event->vcpu[i].time = 0;
init_stats(&event->vcpu[i].stats);
}
}
}
#endif
static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
{
int old_max_vcpu = event->max_vcpu;
void *prev;
if (vcpu_id < event->max_vcpu)
return true;
while (event->max_vcpu <= vcpu_id)
event->max_vcpu += DEFAULT_VCPU_NUM;
prev = event->vcpu;
event->vcpu = realloc(event->vcpu,
event->max_vcpu * sizeof(*event->vcpu));
if (!event->vcpu) {
free(prev);
pr_err("Not enough memory\n");
return false;
}
memset(event->vcpu + old_max_vcpu, 0,
(event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
return true;
}
static void *kvm_he_zalloc(size_t size)
{
struct kvm_event *kvm_ev;
kvm_ev = zalloc(size + sizeof(*kvm_ev));
if (!kvm_ev)
return NULL;
init_stats(&kvm_ev->total.stats);
hists__inc_nr_samples(&kvm_hists.hists, 0);
return &kvm_ev->he;
}
static void kvm_he_free(void *he)
{
struct kvm_event *kvm_ev;
kvm_ev = container_of(he, struct kvm_event, he);
free(kvm_ev);
}
static struct hist_entry_ops kvm_ev_entry_ops = {
.new = kvm_he_zalloc,
.free = kvm_he_free,
};
static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
struct event_key *key,
struct perf_sample *sample)
{
struct kvm_event *event;
struct hist_entry *he;
struct kvm_info *ki;
BUG_ON(key->key == INVALID_KEY);
ki = kvm_info__new();
if (!ki) {
pr_err("Failed to allocate kvm info\n");
return NULL;
}
kvm->events_ops->decode_key(kvm, key, ki->name);
he = hists__add_entry_ops(&kvm_hists.hists, &kvm_ev_entry_ops,
&kvm->al, NULL, NULL, NULL, ki, sample, true);
if (he == NULL) {
pr_err("Failed to allocate hist entry\n");
free(ki);
return NULL;
}
event = container_of(he, struct kvm_event, he);
if (!event->perf_kvm) {
event->perf_kvm = kvm;
event->key = *key;
}
return event;
}
static bool handle_begin_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
struct perf_sample *sample)
{
struct kvm_event *event = NULL;
if (key->key != INVALID_KEY)
event = find_create_kvm_event(kvm, key, sample);
vcpu_record->last_event = event;
vcpu_record->start_time = sample->time;
return true;
}
static void
kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
{
kvm_stats->time += time_diff;
update_stats(&kvm_stats->stats, time_diff);
}
static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
{
struct kvm_event_stats *kvm_stats = &event->total;
if (vcpu_id != -1)
kvm_stats = &event->vcpu[vcpu_id];
return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
avg_stats(&kvm_stats->stats));
}
static bool update_kvm_event(struct perf_kvm_stat *kvm,
struct kvm_event *event, int vcpu_id,
u64 time_diff)
{
/* Update overall statistics */
kvm->total_count++;
kvm->total_time += time_diff;
if (vcpu_id == -1) {
kvm_update_event_stats(&event->total, time_diff);
return true;
}
if (!kvm_event_expand(event, vcpu_id))
return false;
kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
return true;
}
static bool is_child_event(struct perf_kvm_stat *kvm,
struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
struct child_event_ops *child_ops;
child_ops = kvm->events_ops->child_ops;
if (!child_ops)
return false;
for (; child_ops->name; child_ops++) {
if (evsel__name_is(evsel, child_ops->name)) {
child_ops->get_key(evsel, sample, key);
return true;
}
}
return false;
}
static bool handle_child_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
struct perf_sample *sample)
{
struct kvm_event *event = NULL;
if (key->key != INVALID_KEY)
event = find_create_kvm_event(kvm, key, sample);
vcpu_record->last_event = event;
return true;
}
static bool skip_event(const char *event)
{
const char * const *skip_events;
for (skip_events = kvm_skip_events; *skip_events; skip_events++)
if (!strcmp(event, *skip_events))
return true;
return false;
}
static bool handle_end_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
struct perf_sample *sample)
{
struct kvm_event *event;
u64 time_begin, time_diff;
int vcpu;
if (kvm->trace_vcpu == -1)
vcpu = -1;
else
vcpu = vcpu_record->vcpu_id;
event = vcpu_record->last_event;
time_begin = vcpu_record->start_time;
/* The begin event is not caught. */
if (!time_begin)
return true;
/*
* In some case, the 'begin event' only records the start timestamp,
* the actual event is recognized in the 'end event' (e.g. mmio-event).
*/
/* Both begin and end events did not get the key. */
if (!event && key->key == INVALID_KEY)
return true;
if (!event)
event = find_create_kvm_event(kvm, key, sample);
if (!event)
return false;
vcpu_record->last_event = NULL;
vcpu_record->start_time = 0;
/* seems to happen once in a while during live mode */
if (sample->time < time_begin) {
pr_debug("End time before begin time; skipping event.\n");
return true;
}
time_diff = sample->time - time_begin;
if (kvm->duration && time_diff > kvm->duration) {
char decode[KVM_EVENT_NAME_LEN];
kvm->events_ops->decode_key(kvm, &event->key, decode);
if (!skip_event(decode)) {
pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
sample->time, sample->pid, vcpu_record->vcpu_id,
decode, time_diff / NSEC_PER_USEC);
}
}
return update_kvm_event(kvm, event, vcpu, time_diff);
}
static
struct vcpu_event_record *per_vcpu_record(struct thread *thread,
struct evsel *evsel,
struct perf_sample *sample)
{
/* Only kvm_entry records vcpu id. */
if (!thread__priv(thread) && kvm_entry_event(evsel)) {
struct vcpu_event_record *vcpu_record;
vcpu_record = zalloc(sizeof(*vcpu_record));
if (!vcpu_record) {
pr_err("%s: Not enough memory\n", __func__);
return NULL;
}
vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
thread__set_priv(thread, vcpu_record);
}
return thread__priv(thread);
}
static bool handle_kvm_event(struct perf_kvm_stat *kvm,
struct thread *thread,
struct evsel *evsel,
struct perf_sample *sample)
{
struct vcpu_event_record *vcpu_record;
struct event_key key = { .key = INVALID_KEY,
.exit_reasons = kvm->exit_reasons };
vcpu_record = per_vcpu_record(thread, evsel, sample);
if (!vcpu_record)
return true;
/* only process events for vcpus user cares about */
if ((kvm->trace_vcpu != -1) &&
(kvm->trace_vcpu != vcpu_record->vcpu_id))
return true;
if (kvm->events_ops->is_begin_event(evsel, sample, &key))
return handle_begin_event(kvm, vcpu_record, &key, sample);
if (is_child_event(kvm, evsel, sample, &key))
return handle_child_event(kvm, vcpu_record, &key, sample);
if (kvm->events_ops->is_end_event(evsel, sample, &key))
return handle_end_event(kvm, vcpu_record, &key, sample);
return true;
}
static bool is_valid_key(struct perf_kvm_stat *kvm)
{
static const char *key_array[] = {
"ev_name", "sample", "time", "max_t", "min_t", "mean_t",
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(key_array); i++)
if (!strcmp(key_array[i], kvm->sort_key))
return true;
pr_err("Unsupported sort key: %s\n", kvm->sort_key);
return false;
}
static bool event_is_valid(struct kvm_event *event, int vcpu)
{
return !!get_event_count(event, vcpu);
}
static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct kvm_event *event;
struct perf_kvm_stat *perf_kvm;
event = container_of(he, struct kvm_event, he);
perf_kvm = event->perf_kvm;
if (!event_is_valid(event, perf_kvm->trace_vcpu))
he->filtered = 1;
else
he->filtered = 0;
return 0;
}
static void sort_result(struct perf_kvm_stat *kvm)
{
struct ui_progress prog;
const char *output_columns = "ev_name,sample,percent_sample,"
"time,percent_time,max_t,min_t,mean_t";
kvm_hists__reinit(output_columns, kvm->sort_key);
ui_progress__init(&prog, kvm_hists.hists.nr_entries, "Sorting...");
hists__collapse_resort(&kvm_hists.hists, NULL);
hists__output_resort_cb(&kvm_hists.hists, NULL, filter_cb);
ui_progress__finish();
}
static void print_vcpu_info(struct perf_kvm_stat *kvm)
{
int vcpu = kvm->trace_vcpu;
pr_info("Analyze events for ");
if (kvm->opts.target.system_wide)
pr_info("all VMs, ");
else if (kvm->opts.target.pid)
pr_info("pid(s) %s, ", kvm->opts.target.pid);
else
pr_info("dazed and confused on what is monitored, ");
if (vcpu == -1)
pr_info("all VCPUs:\n\n");
else
pr_info("VCPU %d:\n\n", vcpu);
}
static void show_timeofday(void)
{
char date[64];
struct timeval tv;
struct tm ltime;
gettimeofday(&tv, NULL);
if (localtime_r(&tv.tv_sec, <ime)) {
strftime(date, sizeof(date), "%H:%M:%S", <ime);
pr_info("%s.%06ld", date, tv.tv_usec);
} else
pr_info("00:00:00.000000");
return;
}
static void print_result(struct perf_kvm_stat *kvm)
{
char decode[KVM_EVENT_NAME_LEN];
struct kvm_event *event;
int vcpu = kvm->trace_vcpu;
struct rb_node *nd;
if (kvm->live) {
puts(CONSOLE_CLEAR);
show_timeofday();
}
pr_info("\n\n");
print_vcpu_info(kvm);
pr_info("%*s ", KVM_EVENT_NAME_LEN, kvm->events_ops->name);
pr_info("%10s ", "Samples");
pr_info("%9s ", "Samples%");
pr_info("%9s ", "Time%");
pr_info("%11s ", "Min Time");
pr_info("%11s ", "Max Time");
pr_info("%16s ", "Avg time");
pr_info("\n\n");
for (nd = rb_first_cached(&kvm_hists.hists.entries); nd; nd = rb_next(nd)) {
struct hist_entry *he;
u64 ecount, etime, max, min;
he = rb_entry(nd, struct hist_entry, rb_node);
if (he->filtered)
continue;
event = container_of(he, struct kvm_event, he);
ecount = get_event_count(event, vcpu);
etime = get_event_time(event, vcpu);
max = get_event_max(event, vcpu);
min = get_event_min(event, vcpu);
kvm->events_ops->decode_key(kvm, &event->key, decode);
pr_info("%*s ", KVM_EVENT_NAME_LEN, decode);
pr_info("%10llu ", (unsigned long long)ecount);
pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
pr_info("%9.2fus ", (double)min / NSEC_PER_USEC);
pr_info("%9.2fus ", (double)max / NSEC_PER_USEC);
pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount / NSEC_PER_USEC,
kvm_event_rel_stddev(vcpu, event));
pr_info("\n");
}
pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC);
if (kvm->lost_events)
pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
}
#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
static int process_lost_event(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
kvm->lost_events++;
return 0;
}
#endif
static bool skip_sample(struct perf_kvm_stat *kvm,
struct perf_sample *sample)
{
if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
return true;
return false;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread;
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
tool);
if (skip_sample(kvm, sample))
return 0;
if (machine__resolve(machine, &kvm->al, sample) < 0) {
pr_warning("Fail to resolve address location, skip sample.\n");
return 0;
}
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (!handle_kvm_event(kvm, thread, evsel, sample))
err = -1;
thread__put(thread);
return err;
}
static int cpu_isa_config(struct perf_kvm_stat *kvm)
{
char buf[128], *cpuid;
int err;
if (kvm->live) {
err = get_cpuid(buf, sizeof(buf));
if (err != 0) {
pr_err("Failed to look up CPU type: %s\n",
str_error_r(err, buf, sizeof(buf)));
return -err;
}
cpuid = buf;
} else
cpuid = kvm->session->header.env.cpuid;
if (!cpuid) {
pr_err("Failed to look up CPU type\n");
return -EINVAL;
}
err = cpu_isa_init(kvm, cpuid);
if (err == -ENOTSUP)
pr_err("CPU %s is not supported.\n", cpuid);
return err;
}
static bool verify_vcpu(int vcpu)
{
if (vcpu != -1 && vcpu < 0) {
pr_err("Invalid vcpu:%d.\n", vcpu);
return false;
}
return true;
}
#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
/* keeping the max events to a modest level to keep
* the processing of samples per mmap smooth.
*/
#define PERF_KVM__MAX_EVENTS_PER_MMAP 25
static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
u64 *mmap_time)
{
struct evlist *evlist = kvm->evlist;
union perf_event *event;
struct mmap *md;
u64 timestamp;
s64 n = 0;
int err;
*mmap_time = ULLONG_MAX;
md = &evlist->mmap[idx];
err = perf_mmap__read_init(&md->core);
if (err < 0)
return (err == -EAGAIN) ? 0 : -1;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
err = evlist__parse_sample_timestamp(evlist, event, ×tamp);
if (err) {
perf_mmap__consume(&md->core);
pr_err("Failed to parse sample\n");
return -1;
}
err = perf_session__queue_event(kvm->session, event, timestamp, 0, NULL);
/*
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
perf_mmap__consume(&md->core);
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);
return -1;
}
/* save time stamp of our first sample for this mmap */
if (n == 0)
*mmap_time = timestamp;
/* limit events per mmap handled all at once */
n++;
if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
break;
}
perf_mmap__read_done(&md->core);
return n;
}
static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
{
int i, err, throttled = 0;
s64 n, ntotal = 0;
u64 flush_time = ULLONG_MAX, mmap_time;
for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
if (n < 0)
return -1;
/* flush time is going to be the minimum of all the individual
* mmap times. Essentially, we flush all the samples queued up
* from the last pass under our minimal start time -- that leaves
* a very small race for samples to come in with a lower timestamp.
* The ioctl to return the perf_clock timestamp should close the
* race entirely.
*/
if (mmap_time < flush_time)
flush_time = mmap_time;
ntotal += n;
if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
throttled = 1;
}
/* flush queue after each round in which we processed events */
if (ntotal) {
struct ordered_events *oe = &kvm->session->ordered_events;
oe->next_flush = flush_time;
err = ordered_events__flush(oe, OE_FLUSH__ROUND);
if (err) {
if (kvm->lost_events)
pr_info("\nLost events: %" PRIu64 "\n\n",
kvm->lost_events);
return err;
}
}
return throttled;
}
static volatile int done;
static void sig_handler(int sig __maybe_unused)
{
done = 1;
}
static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
{
struct itimerspec new_value;
int rc = -1;
kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
if (kvm->timerfd < 0) {
pr_err("timerfd_create failed\n");
goto out;
}
new_value.it_value.tv_sec = kvm->display_time;
new_value.it_value.tv_nsec = 0;
new_value.it_interval.tv_sec = kvm->display_time;
new_value.it_interval.tv_nsec = 0;
if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
pr_err("timerfd_settime failed: %d\n", errno);
close(kvm->timerfd);
goto out;
}
rc = 0;
out:
return rc;
}
static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
{
uint64_t c;
int rc;
rc = read(kvm->timerfd, &c, sizeof(uint64_t));
if (rc < 0) {
if (errno == EAGAIN)
return 0;
pr_err("Failed to read timer fd: %d\n", errno);
return -1;
}
if (rc != sizeof(uint64_t)) {
pr_err("Error reading timer fd - invalid size returned\n");
return -1;
}
if (c != 1)
pr_debug("Missed timer beats: %" PRIu64 "\n", c-1);
/* update display */
sort_result(kvm);
print_result(kvm);
/* Reset sort list to "ev_name" */
kvm_hists__reinit(NULL, "ev_name");
/* reset counts */
clear_events_cache_stats();
kvm->total_count = 0;
kvm->total_time = 0;
kvm->lost_events = 0;
return 0;
}
static int fd_set_nonblock(int fd)
{
long arg = 0;
arg = fcntl(fd, F_GETFL);
if (arg < 0) {
pr_err("Failed to get current flags for fd %d\n", fd);
return -1;
}
if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) {
pr_err("Failed to set non-block option on fd %d\n", fd);
return -1;
}
return 0;
}
static int perf_kvm__handle_stdin(void)
{
int c;
c = getc(stdin);
if (c == 'q')
return 1;
return 0;
}
static int kvm_events_live_report(struct perf_kvm_stat *kvm)
{
int nr_stdin, ret, err = -EINVAL;
struct termios save;
/* live flag must be set first */
kvm->live = true;
ret = cpu_isa_config(kvm);
if (ret < 0)
return ret;
if (!verify_vcpu(kvm->trace_vcpu) ||
!is_valid_key(kvm) ||
!register_kvm_events_ops(kvm)) {
goto out;
}
set_term_quiet_input(&save);
kvm_hists__init();
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
/* add timer fd */
if (perf_kvm__timerfd_create(kvm) < 0) {
err = -1;
goto out;
}
if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
goto out;
nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
if (nr_stdin < 0)
goto out;
if (fd_set_nonblock(fileno(stdin)) != 0)
goto out;
/* everything is good - enable the events and process */
evlist__enable(kvm->evlist);
while (!done) {
struct fdarray *fda = &kvm->evlist->core.pollfd;
int rc;
rc = perf_kvm__mmap_read(kvm);
if (rc < 0)
break;
err = perf_kvm__handle_timerfd(kvm);
if (err)
goto out;
if (fda->entries[nr_stdin].revents & POLLIN)
done = perf_kvm__handle_stdin();
if (!rc && !done)
err = evlist__poll(kvm->evlist, 100);
}
evlist__disable(kvm->evlist);
if (err == 0) {
sort_result(kvm);
print_result(kvm);
}
out:
hists__delete_entries(&kvm_hists.hists);
if (kvm->timerfd >= 0)
close(kvm->timerfd);
tcsetattr(0, TCSAFLUSH, &save);
return err;
}
static int kvm_live_open_events(struct perf_kvm_stat *kvm)
{
int err, rc = -1;
struct evsel *pos;
struct evlist *evlist = kvm->evlist;
char sbuf[STRERR_BUFSIZE];
evlist__config(evlist, &kvm->opts, NULL);
/*
* Note: exclude_{guest,host} do not apply here.
* This command processes KVM tracepoints from host only
*/
evlist__for_each_entry(evlist, pos) {
struct perf_event_attr *attr = &pos->core.attr;
/* make sure these *are* set */
evsel__set_sample_bit(pos, TID);
evsel__set_sample_bit(pos, TIME);
evsel__set_sample_bit(pos, CPU);
evsel__set_sample_bit(pos, RAW);
/* make sure these are *not*; want as small a sample as possible */
evsel__reset_sample_bit(pos, PERIOD);
evsel__reset_sample_bit(pos, IP);
evsel__reset_sample_bit(pos, CALLCHAIN);
evsel__reset_sample_bit(pos, ADDR);
evsel__reset_sample_bit(pos, READ);
attr->mmap = 0;
attr->comm = 0;
attr->task = 0;
attr->sample_period = 1;
attr->watermark = 0;
attr->wakeup_events = 1000;
/* will enable all once we are ready */
attr->disabled = 1;
}
err = evlist__open(evlist);
if (err < 0) {
printf("Couldn't create the events: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out;
}
if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
ui__error("Failed to mmap the events: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
evlist__close(evlist);
goto out;
}
rc = 0;
out:
return rc;
}
#endif
static int read_events(struct perf_kvm_stat *kvm)
{
int ret;
struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
.ordered_events = true,
};
struct perf_data file = {
.path = kvm->file_name,
.mode = PERF_DATA_MODE_READ,
.force = kvm->force,
};
kvm->tool = eops;
kvm->session = perf_session__new(&file, &kvm->tool);
if (IS_ERR(kvm->session)) {
pr_err("Initializing perf session failed\n");
return PTR_ERR(kvm->session);
}
symbol__init(&kvm->session->header.env);
if (!perf_session__has_traces(kvm->session, "kvm record")) {
ret = -EINVAL;
goto out_delete;
}
/*
* Do not use 'isa' recorded in kvm_exit tracepoint since it is not
* traced in the old kernel.
*/
ret = cpu_isa_config(kvm);
if (ret < 0)
goto out_delete;
ret = perf_session__process_events(kvm->session);
out_delete:
perf_session__delete(kvm->session);
return ret;
}
static int parse_target_str(struct perf_kvm_stat *kvm)
{
if (kvm->opts.target.pid) {
kvm->pid_list = intlist__new(kvm->opts.target.pid);
if (kvm->pid_list == NULL) {
pr_err("Error parsing process id string\n");
return -EINVAL;
}
}
return 0;
}
static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
{
int ret = -EINVAL;
int vcpu = kvm->trace_vcpu;
if (parse_target_str(kvm) != 0)
goto exit;
if (!verify_vcpu(vcpu))
goto exit;
if (!is_valid_key(kvm))
goto exit;
if (!register_kvm_events_ops(kvm))
goto exit;
if (kvm->use_stdio) {
use_browser = 0;
setup_pager();
} else {
use_browser = 1;
}
setup_browser(false);
kvm_hists__init();
ret = read_events(kvm);
if (ret)
goto exit;
sort_result(kvm);
kvm_display(kvm);
exit:
hists__delete_entries(&kvm_hists.hists);
return ret;
}
#define STRDUP_FAIL_EXIT(s) \
({ char *_p; \
_p = strdup(s); \
if (!_p) \
return -ENOMEM; \
_p; \
})
int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
{
return 0;
}
static int
kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
unsigned int rec_argc, i, j, events_tp_size;
const char **rec_argv;
const char * const record_args[] = {
"record",
"-R",
"-m", "1024",
"-c", "1",
};
const char * const kvm_stat_record_usage[] = {
"perf kvm stat record [<options>]",
NULL
};
const char * const *events_tp;
int ret;
events_tp_size = 0;
ret = setup_kvm_events_tp(kvm);
if (ret < 0) {
pr_err("Unable to setup the kvm tracepoints\n");
return ret;
}
for (events_tp = kvm_events_tp; *events_tp; events_tp++)
events_tp_size++;
rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
2 * events_tp_size;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
for (j = 0; j < events_tp_size; j++) {
rec_argv[i++] = "-e";
rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
}
rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN);
set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN);
set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN);
set_option_flag(record_options, 'F', "freq", PARSE_OPT_DISABLED);
set_option_flag(record_options, 0, "group", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'g', NULL, PARSE_OPT_DISABLED);
set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'd', "data", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'T', "timestamp", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'P', "period", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'G', "cgroup", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED);
set_option_flag(record_options, 'W', "weight", PARSE_OPT_DISABLED);
set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED);
record_usage = kvm_stat_record_usage;
return cmd_record(i, rec_argv);
}
static int
kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
const struct option kvm_events_report_options[] = {
OPT_STRING(0, "event", &kvm->report_event, "report event",
"event for reporting: vmexit, "
"mmio (x86 only), ioport (x86 only)"),
OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
"vcpu id to report"),
OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
"key for sorting: sample(sort by samples number)"
" time (sort by avg time)"),
OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
"analyze events only for given process id(s)"),
OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
OPT_BOOLEAN(0, "stdio", &kvm->use_stdio, "use the stdio interface"),
OPT_END()
};
const char * const kvm_events_report_usage[] = {
"perf kvm stat report [<options>]",
NULL
};
if (argc) {
argc = parse_options(argc, argv,
kvm_events_report_options,
kvm_events_report_usage, 0);
if (argc)
usage_with_options(kvm_events_report_usage,
kvm_events_report_options);
}
#ifndef HAVE_SLANG_SUPPORT
kvm->use_stdio = true;
#endif
if (!kvm->opts.target.pid)
kvm->opts.target.system_wide = true;
return kvm_events_report_vcpu(kvm);
}
#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
static struct evlist *kvm_live_event_list(void)
{
struct evlist *evlist;
char *tp, *name, *sys;
int err = -1;
const char * const *events_tp;
evlist = evlist__new();
if (evlist == NULL)
return NULL;
for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
tp = strdup(*events_tp);
if (tp == NULL)
goto out;
/* split tracepoint into subsystem and name */
sys = tp;
name = strchr(tp, ':');
if (name == NULL) {
pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
*events_tp);
free(tp);
goto out;
}
*name = '\0';
name++;
if (evlist__add_newtp(evlist, sys, name, NULL)) {
pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
free(tp);
goto out;
}
free(tp);
}
err = 0;
out:
if (err) {
evlist__delete(evlist);
evlist = NULL;
}
return evlist;
}
static int kvm_events_live(struct perf_kvm_stat *kvm,
int argc, const char **argv)
{
char errbuf[BUFSIZ];
int err;
const struct option live_options[] = {
OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
"record events on existing process id"),
OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
"number of mmap data pages", evlist__parse_mmap_pages),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
"system-wide collection from all CPUs"),
OPT_UINTEGER('d', "display", &kvm->display_time,
"time in seconds between display updates"),
OPT_STRING(0, "event", &kvm->report_event, "report event",
"event for reporting: "
"vmexit, mmio (x86 only), ioport (x86 only)"),
OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
"vcpu id to report"),
OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
"key for sorting: sample(sort by samples number)"
" time (sort by avg time)"),
OPT_U64(0, "duration", &kvm->duration,
"show events other than"
" HLT (x86 only) or Wait state (s390 only)"
" that take longer than duration usecs"),
OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_END()
};
const char * const live_usage[] = {
"perf kvm stat live [<options>]",
NULL
};
struct perf_data data = {
.mode = PERF_DATA_MODE_WRITE,
};
/* event handling */
kvm->tool.sample = process_sample_event;
kvm->tool.comm = perf_event__process_comm;
kvm->tool.exit = perf_event__process_exit;
kvm->tool.fork = perf_event__process_fork;
kvm->tool.lost = process_lost_event;
kvm->tool.namespaces = perf_event__process_namespaces;
kvm->tool.ordered_events = true;
perf_tool__fill_defaults(&kvm->tool);
/* set defaults */
kvm->display_time = 1;
kvm->opts.user_interval = 1;
kvm->opts.mmap_pages = 512;
kvm->opts.target.uses_mmap = false;
kvm->opts.target.uid_str = NULL;
kvm->opts.target.uid = UINT_MAX;
symbol__init(NULL);
disable_buildid_cache();
use_browser = 0;
if (argc) {
argc = parse_options(argc, argv, live_options,
live_usage, 0);
if (argc)
usage_with_options(live_usage, live_options);
}
kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */
/*
* target related setups
*/
err = target__validate(&kvm->opts.target);
if (err) {
target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
}
if (target__none(&kvm->opts.target))
kvm->opts.target.system_wide = true;
/*
* generate the event list
*/
err = setup_kvm_events_tp(kvm);
if (err < 0) {
pr_err("Unable to setup the kvm tracepoints\n");
return err;
}
kvm->evlist = kvm_live_event_list();
if (kvm->evlist == NULL) {
err = -1;
goto out;
}
if (evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
usage_with_options(live_usage, live_options);
/*
* perf session
*/
kvm->session = perf_session__new(&data, &kvm->tool);
if (IS_ERR(kvm->session)) {
err = PTR_ERR(kvm->session);
goto out;
}
kvm->session->evlist = kvm->evlist;
perf_session__set_id_hdr_size(kvm->session);
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
kvm->evlist->core.threads, true, false, 1);
err = kvm_live_open_events(kvm);
if (err)
goto out;
err = kvm_events_live_report(kvm);
out:
perf_session__delete(kvm->session);
kvm->session = NULL;
evlist__delete(kvm->evlist);
return err;
}
#endif
static void print_kvm_stat_usage(void)
{
printf("Usage: perf kvm stat <command>\n\n");
printf("# Available commands:\n");
printf("\trecord: record kvm events\n");
printf("\treport: report statistical data of kvm events\n");
printf("\tlive: live reporting of statistical data of kvm events\n");
printf("\nOtherwise, it is the alias of 'perf stat':\n");
}
static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
{
struct perf_kvm_stat kvm = {
.file_name = file_name,
.trace_vcpu = -1,
.report_event = "vmexit",
.sort_key = "sample",
};
if (argc == 1) {
print_kvm_stat_usage();
goto perf_stat;
}
if (strlen(argv[1]) > 2 && strstarts("record", argv[1]))
return kvm_events_record(&kvm, argc - 1, argv + 1);
if (strlen(argv[1]) > 2 && strstarts("report", argv[1]))
return kvm_events_report(&kvm, argc - 1 , argv + 1);
#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
if (!strncmp(argv[1], "live", 4))
return kvm_events_live(&kvm, argc - 1 , argv + 1);
#endif
perf_stat:
return cmd_stat(argc, argv);
}
#endif /* HAVE_KVM_STAT_SUPPORT */
int __weak kvm_add_default_arch_event(int *argc __maybe_unused,
const char **argv __maybe_unused)
{
return 0;
}
static int __cmd_record(const char *file_name, int argc, const char **argv)
{
int rec_argc, i = 0, j, ret;
const char **rec_argv;
ret = kvm_add_default_arch_event(&argc, argv);
if (ret)
return -EINVAL;
rec_argc = argc + 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
rec_argv[i++] = strdup("record");
rec_argv[i++] = strdup("-o");
rec_argv[i++] = strdup(file_name);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
BUG_ON(i != rec_argc);
return cmd_record(i, rec_argv);
}
static int __cmd_report(const char *file_name, int argc, const char **argv)
{
int rec_argc, i = 0, j;
const char **rec_argv;
rec_argc = argc + 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
rec_argv[i++] = strdup("report");
rec_argv[i++] = strdup("-i");
rec_argv[i++] = strdup(file_name);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
BUG_ON(i != rec_argc);
return cmd_report(i, rec_argv);
}
static int
__cmd_buildid_list(const char *file_name, int argc, const char **argv)
{
int rec_argc, i = 0, j;
const char **rec_argv;
rec_argc = argc + 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
rec_argv[i++] = strdup("buildid-list");
rec_argv[i++] = strdup("-i");
rec_argv[i++] = strdup(file_name);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
BUG_ON(i != rec_argc);
return cmd_buildid_list(i, rec_argv);
}
int cmd_kvm(int argc, const char **argv)
{
const char *file_name = NULL;
const struct option kvm_options[] = {
OPT_STRING('i', "input", &file_name, "file",
"Input file name"),
OPT_STRING('o', "output", &file_name, "file",
"Output file name"),
OPT_BOOLEAN(0, "guest", &perf_guest,
"Collect guest os data"),
OPT_BOOLEAN(0, "host", &perf_host,
"Collect host os data"),
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
"guest mount directory under which every guest os"
" instance has a subdir"),
OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
"file", "file saving guest os vmlinux"),
OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
"Guest code can be found in hypervisor process"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_END()
};
const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
"buildid-list", "stat", NULL };
const char *kvm_usage[] = { NULL, NULL };
perf_host = 0;
perf_guest = 1;
argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(kvm_usage, kvm_options);
if (!perf_host)
perf_guest = 1;
if (!file_name) {
file_name = get_filename_for_perf_kvm();
if (!file_name) {
pr_err("Failed to allocate memory for filename\n");
return -ENOMEM;
}
}
if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
return __cmd_record(file_name, argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
return __cmd_report(file_name, argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0]))
return cmd_diff(argc, argv);
else if (!strcmp(argv[0], "top"))
return cmd_top(argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
return __cmd_buildid_list(file_name, argc, argv);
#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
return kvm_cmd_stat(file_name, argc, argv);
#endif
else
usage_with_options(kvm_usage, kvm_options);
return 0;
}
| linux-master | tools/perf/builtin-kvm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-bench.c
*
* General benchmarking collections provided by perf
*
* Copyright (C) 2009, Hitoshi Mitake <[email protected]>
*/
/*
* Available benchmark collection list:
*
* sched ... scheduler and IPC performance
* syscall ... System call performance
* mem ... memory access performance
* numa ... NUMA scheduling and MM performance
* futex ... Futex performance
* epoll ... Event poll performance
*/
#include <subcmd/parse-options.h>
#include "builtin.h"
#include "bench/bench.h"
#include <locale.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/prctl.h>
#include <linux/zalloc.h>
typedef int (*bench_fn_t)(int argc, const char **argv);
struct bench {
const char *name;
const char *summary;
bench_fn_t fn;
};
#ifdef HAVE_LIBNUMA_SUPPORT
static struct bench numa_benchmarks[] = {
{ "mem", "Benchmark for NUMA workloads", bench_numa },
{ "all", "Run all NUMA benchmarks", NULL },
{ NULL, NULL, NULL }
};
#endif
static struct bench sched_benchmarks[] = {
{ "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging },
{ "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe },
{ "seccomp-notify", "Benchmark for seccomp user notify", bench_sched_seccomp_notify},
{ "all", "Run all scheduler benchmarks", NULL },
{ NULL, NULL, NULL }
};
static struct bench syscall_benchmarks[] = {
{ "basic", "Benchmark for basic getppid(2) calls", bench_syscall_basic },
{ "getpgid", "Benchmark for getpgid(2) calls", bench_syscall_getpgid },
{ "fork", "Benchmark for fork(2) calls", bench_syscall_fork },
{ "execve", "Benchmark for execve(2) calls", bench_syscall_execve },
{ "all", "Run all syscall benchmarks", NULL },
{ NULL, NULL, NULL },
};
static struct bench mem_benchmarks[] = {
{ "memcpy", "Benchmark for memcpy() functions", bench_mem_memcpy },
{ "memset", "Benchmark for memset() functions", bench_mem_memset },
{ "find_bit", "Benchmark for find_bit() functions", bench_mem_find_bit },
{ "all", "Run all memory access benchmarks", NULL },
{ NULL, NULL, NULL }
};
static struct bench futex_benchmarks[] = {
{ "hash", "Benchmark for futex hash table", bench_futex_hash },
{ "wake", "Benchmark for futex wake calls", bench_futex_wake },
{ "wake-parallel", "Benchmark for parallel futex wake calls", bench_futex_wake_parallel },
{ "requeue", "Benchmark for futex requeue calls", bench_futex_requeue },
/* pi-futexes */
{ "lock-pi", "Benchmark for futex lock_pi calls", bench_futex_lock_pi },
{ "all", "Run all futex benchmarks", NULL },
{ NULL, NULL, NULL }
};
#ifdef HAVE_EVENTFD_SUPPORT
static struct bench epoll_benchmarks[] = {
{ "wait", "Benchmark epoll concurrent epoll_waits", bench_epoll_wait },
{ "ctl", "Benchmark epoll concurrent epoll_ctls", bench_epoll_ctl },
{ "all", "Run all futex benchmarks", NULL },
{ NULL, NULL, NULL }
};
#endif // HAVE_EVENTFD_SUPPORT
static struct bench internals_benchmarks[] = {
{ "synthesize", "Benchmark perf event synthesis", bench_synthesize },
{ "kallsyms-parse", "Benchmark kallsyms parsing", bench_kallsyms_parse },
{ "inject-build-id", "Benchmark build-id injection", bench_inject_build_id },
{ "evlist-open-close", "Benchmark evlist open and close", bench_evlist_open_close },
{ "pmu-scan", "Benchmark sysfs PMU info scanning", bench_pmu_scan },
{ NULL, NULL, NULL }
};
static struct bench breakpoint_benchmarks[] = {
{ "thread", "Benchmark thread start/finish with breakpoints", bench_breakpoint_thread},
{ "enable", "Benchmark breakpoint enable/disable", bench_breakpoint_enable},
{ "all", "Run all breakpoint benchmarks", NULL},
{ NULL, NULL, NULL },
};
static struct bench uprobe_benchmarks[] = {
{ "baseline", "Baseline libc usleep(1000) call", bench_uprobe_baseline, },
{ "empty", "Attach empty BPF prog to uprobe on usleep, system wide", bench_uprobe_empty, },
{ "trace_printk", "Attach trace_printk BPF prog to uprobe on usleep syswide", bench_uprobe_trace_printk, },
{ NULL, NULL, NULL },
};
struct collection {
const char *name;
const char *summary;
struct bench *benchmarks;
};
static struct collection collections[] = {
{ "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
{ "syscall", "System call benchmarks", syscall_benchmarks },
{ "mem", "Memory access benchmarks", mem_benchmarks },
#ifdef HAVE_LIBNUMA_SUPPORT
{ "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
#endif
{"futex", "Futex stressing benchmarks", futex_benchmarks },
#ifdef HAVE_EVENTFD_SUPPORT
{"epoll", "Epoll stressing benchmarks", epoll_benchmarks },
#endif
{ "internals", "Perf-internals benchmarks", internals_benchmarks },
{ "breakpoint", "Breakpoint benchmarks", breakpoint_benchmarks },
{ "uprobe", "uprobe benchmarks", uprobe_benchmarks },
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};
/* Iterate over all benchmark collections: */
#define for_each_collection(coll) \
for (coll = collections; coll->name; coll++)
/* Iterate over all benchmarks within a collection: */
#define for_each_bench(coll, bench) \
for (bench = coll->benchmarks; bench && bench->name; bench++)
static void dump_benchmarks(struct collection *coll)
{
struct bench *bench;
printf("\n # List of available benchmarks for collection '%s':\n\n", coll->name);
for_each_bench(coll, bench)
printf("%14s: %s\n", bench->name, bench->summary);
printf("\n");
}
static const char *bench_format_str;
/* Output/formatting style, exported to benchmark modules: */
int bench_format = BENCH_FORMAT_DEFAULT;
unsigned int bench_repeat = 10; /* default number of times to repeat the run */
static const struct option bench_options[] = {
OPT_STRING('f', "format", &bench_format_str, "default|simple", "Specify the output formatting style"),
OPT_UINTEGER('r', "repeat", &bench_repeat, "Specify number of times to repeat the run"),
OPT_END()
};
static const char * const bench_usage[] = {
"perf bench [<common options>] <collection> <benchmark> [<options>]",
NULL
};
static void print_usage(void)
{
struct collection *coll;
int i;
printf("Usage: \n");
for (i = 0; bench_usage[i]; i++)
printf("\t%s\n", bench_usage[i]);
printf("\n");
printf(" # List of all available benchmark collections:\n\n");
for_each_collection(coll)
printf("%14s: %s\n", coll->name, coll->summary);
printf("\n");
}
static int bench_str2int(const char *str)
{
if (!str)
return BENCH_FORMAT_DEFAULT;
if (!strcmp(str, BENCH_FORMAT_DEFAULT_STR))
return BENCH_FORMAT_DEFAULT;
else if (!strcmp(str, BENCH_FORMAT_SIMPLE_STR))
return BENCH_FORMAT_SIMPLE;
return BENCH_FORMAT_UNKNOWN;
}
/*
* Run a specific benchmark but first rename the running task's ->comm[]
* to something meaningful:
*/
static int run_bench(const char *coll_name, const char *bench_name, bench_fn_t fn,
int argc, const char **argv)
{
int size;
char *name;
int ret;
size = strlen(coll_name) + 1 + strlen(bench_name) + 1;
name = zalloc(size);
BUG_ON(!name);
scnprintf(name, size, "%s-%s", coll_name, bench_name);
prctl(PR_SET_NAME, name);
argv[0] = name;
ret = fn(argc, argv);
free(name);
return ret;
}
static void run_collection(struct collection *coll)
{
struct bench *bench;
const char *argv[2];
argv[1] = NULL;
/*
* TODO:
*
* Preparing preset parameters for
* embedded, ordinary PC, HPC, etc...
* would be helpful.
*/
for_each_bench(coll, bench) {
if (!bench->fn)
break;
printf("# Running %s/%s benchmark...\n", coll->name, bench->name);
argv[1] = bench->name;
run_bench(coll->name, bench->name, bench->fn, 1, argv);
printf("\n");
}
}
static void run_all_collections(void)
{
struct collection *coll;
for_each_collection(coll)
run_collection(coll);
}
int cmd_bench(int argc, const char **argv)
{
struct collection *coll;
int ret = 0;
/* Unbuffered output */
setvbuf(stdout, NULL, _IONBF, 0);
setlocale(LC_ALL, "");
if (argc < 2) {
/* No collection specified. */
print_usage();
goto end;
}
argc = parse_options(argc, argv, bench_options, bench_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
bench_format = bench_str2int(bench_format_str);
if (bench_format == BENCH_FORMAT_UNKNOWN) {
printf("Unknown format descriptor: '%s'\n", bench_format_str);
goto end;
}
if (bench_repeat == 0) {
printf("Invalid repeat option: Must specify a positive value\n");
goto end;
}
if (argc < 1) {
print_usage();
goto end;
}
if (!strcmp(argv[0], "all")) {
run_all_collections();
goto end;
}
for_each_collection(coll) {
struct bench *bench;
if (strcmp(coll->name, argv[0]))
continue;
if (argc < 2) {
/* No bench specified. */
dump_benchmarks(coll);
goto end;
}
if (!strcmp(argv[1], "all")) {
run_collection(coll);
goto end;
}
for_each_bench(coll, bench) {
if (strcmp(bench->name, argv[1]))
continue;
if (bench_format == BENCH_FORMAT_DEFAULT)
printf("# Running '%s/%s' benchmark:\n", coll->name, bench->name);
ret = run_bench(coll->name, bench->name, bench->fn, argc-1, argv+1);
goto end;
}
if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
dump_benchmarks(coll);
goto end;
}
printf("Unknown benchmark: '%s' for collection '%s'\n", argv[1], argv[0]);
ret = 1;
goto end;
}
printf("Unknown collection: '%s'\n", argv[0]);
ret = 1;
end:
return ret;
}
| linux-master | tools/perf/builtin-bench.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-buildid-cache.c
*
* Builtin buildid-cache command: Manages build-id cache
*
* Copyright (C) 2010, Red Hat Inc.
* Copyright (C) 2010, Arnaldo Carvalho de Melo <[email protected]>
*/
#include <sys/types.h>
#include <sys/time.h>
#include <time.h>
#include <dirent.h>
#include <errno.h>
#include <unistd.h>
#include "builtin.h"
#include "namespaces.h"
#include "util/debug.h"
#include "util/header.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/strlist.h"
#include "util/build-id.h"
#include "util/session.h"
#include "util/dso.h"
#include "util/symbol.h"
#include "util/time-utils.h"
#include "util/util.h"
#include "util/probe-file.h"
#include "util/config.h"
#include <linux/string.h>
#include <linux/err.h>
static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
{
char root_dir[PATH_MAX];
char *p;
strlcpy(root_dir, proc_dir, sizeof(root_dir));
p = strrchr(root_dir, '/');
if (!p)
return -1;
*p = '\0';
return sysfs__sprintf_build_id(root_dir, sbuildid);
}
static int build_id_cache__kcore_dir(char *dir, size_t sz)
{
return fetch_current_timestamp(dir, sz);
}
static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
{
char from[PATH_MAX];
char to[PATH_MAX];
const char *name;
u64 addr1 = 0, addr2 = 0;
int i, err = -1;
scnprintf(from, sizeof(from), "%s/kallsyms", from_dir);
scnprintf(to, sizeof(to), "%s/kallsyms", to_dir);
for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
err = kallsyms__get_function_start(from, name, &addr1);
if (!err)
break;
}
if (err)
return false;
if (kallsyms__get_function_start(to, name, &addr2))
return false;
return addr1 == addr2;
}
static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
size_t to_dir_sz)
{
char from[PATH_MAX];
char to[PATH_MAX];
char to_subdir[PATH_MAX];
struct dirent *dent;
int ret = -1;
DIR *d;
d = opendir(to_dir);
if (!d)
return -1;
scnprintf(from, sizeof(from), "%s/modules", from_dir);
while (1) {
dent = readdir(d);
if (!dent)
break;
if (dent->d_type != DT_DIR)
continue;
scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
dent->d_name);
scnprintf(to_subdir, sizeof(to_subdir), "%s/%s",
to_dir, dent->d_name);
if (!compare_proc_modules(from, to) &&
same_kallsyms_reloc(from_dir, to_subdir)) {
strlcpy(to_dir, to_subdir, to_dir_sz);
ret = 0;
break;
}
}
closedir(d);
return ret;
}
static int build_id_cache__add_kcore(const char *filename, bool force)
{
char dir[32], sbuildid[SBUILD_ID_SIZE];
char from_dir[PATH_MAX], to_dir[PATH_MAX];
char *p;
strlcpy(from_dir, filename, sizeof(from_dir));
p = strrchr(from_dir, '/');
if (!p || strcmp(p + 1, "kcore"))
return -1;
*p = '\0';
if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
return -1;
scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s",
buildid_dir, DSO__NAME_KCORE, sbuildid);
if (!force &&
!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
pr_debug("same kcore found in %s\n", to_dir);
return 0;
}
if (build_id_cache__kcore_dir(dir, sizeof(dir)))
return -1;
scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s",
buildid_dir, DSO__NAME_KCORE, sbuildid, dir);
if (mkdir_p(to_dir, 0755))
return -1;
if (kcore_copy(from_dir, to_dir)) {
/* Remove YYYYmmddHHMMSShh directory */
if (!rmdir(to_dir)) {
p = strrchr(to_dir, '/');
if (p)
*p = '\0';
/* Try to remove buildid directory */
if (!rmdir(to_dir)) {
p = strrchr(to_dir, '/');
if (p)
*p = '\0';
/* Try to remove [kernel.kcore] directory */
rmdir(to_dir);
}
}
return -1;
}
pr_debug("kcore added to build-id cache directory %s\n", to_dir);
return 0;
}
static int build_id_cache__add_file(const char *filename, struct nsinfo *nsi)
{
char sbuild_id[SBUILD_ID_SIZE];
struct build_id bid;
int err;
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
err = filename__read_build_id(filename, &bid);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
return -1;
}
build_id__sprintf(&bid, sbuild_id);
err = build_id_cache__add_s(sbuild_id, filename, nsi,
false, false);
pr_debug("Adding %s %s: %s\n", sbuild_id, filename,
err ? "FAIL" : "Ok");
return err;
}
static int build_id_cache__remove_file(const char *filename, struct nsinfo *nsi)
{
char sbuild_id[SBUILD_ID_SIZE];
struct build_id bid;
struct nscookie nsc;
int err;
nsinfo__mountns_enter(nsi, &nsc);
err = filename__read_build_id(filename, &bid);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
return -1;
}
build_id__sprintf(&bid, sbuild_id);
err = build_id_cache__remove_s(sbuild_id);
pr_debug("Removing %s %s: %s\n", sbuild_id, filename,
err ? "FAIL" : "Ok");
return err;
}
static int build_id_cache__purge_path(const char *pathname, struct nsinfo *nsi)
{
struct strlist *list;
struct str_node *pos;
int err;
err = build_id_cache__list_build_ids(pathname, nsi, &list);
if (err)
goto out;
strlist__for_each_entry(pos, list) {
err = build_id_cache__remove_s(pos->s);
pr_debug("Removing %s %s: %s\n", pos->s, pathname,
err ? "FAIL" : "Ok");
if (err)
break;
}
strlist__delete(list);
out:
pr_debug("Purging %s: %s\n", pathname, err ? "FAIL" : "Ok");
return err;
}
static int build_id_cache__purge_all(void)
{
struct strlist *list;
struct str_node *pos;
int err = 0;
char *buf;
list = build_id_cache__list_all(false);
if (!list) {
pr_debug("Failed to get buildids: -%d\n", errno);
return -EINVAL;
}
strlist__for_each_entry(pos, list) {
buf = build_id_cache__origname(pos->s);
err = build_id_cache__remove_s(pos->s);
pr_debug("Removing %s (%s): %s\n", buf, pos->s,
err ? "FAIL" : "Ok");
free(buf);
if (err)
break;
}
strlist__delete(list);
pr_debug("Purged all: %s\n", err ? "FAIL" : "Ok");
return err;
}
static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
{
char filename[PATH_MAX];
struct build_id bid;
if (dso__build_id_filename(dso, filename, sizeof(filename), false) &&
filename__read_build_id(filename, &bid) == -1) {
if (errno == ENOENT)
return false;
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
} else if (memcmp(dso->bid.data, bid.data, bid.size)) {
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
}
return true;
}
static int build_id_cache__fprintf_missing(struct perf_session *session, FILE *fp)
{
perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0);
return 0;
}
static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
{
char sbuild_id[SBUILD_ID_SIZE];
struct build_id bid;
struct nscookie nsc;
int err;
nsinfo__mountns_enter(nsi, &nsc);
err = filename__read_build_id(filename, &bid);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
return -1;
}
err = 0;
build_id__sprintf(&bid, sbuild_id);
if (build_id_cache__cached(sbuild_id))
err = build_id_cache__remove_s(sbuild_id);
if (!err)
err = build_id_cache__add_s(sbuild_id, filename, nsi, false,
false);
pr_debug("Updating %s %s: %s\n", sbuild_id, filename,
err ? "FAIL" : "Ok");
return err;
}
static int build_id_cache__show_all(void)
{
struct strlist *bidlist;
struct str_node *nd;
char *buf;
bidlist = build_id_cache__list_all(true);
if (!bidlist) {
pr_debug("Failed to get buildids: -%d\n", errno);
return -1;
}
strlist__for_each_entry(nd, bidlist) {
buf = build_id_cache__origname(nd->s);
fprintf(stdout, "%s %s\n", nd->s, buf);
free(buf);
}
strlist__delete(bidlist);
return 0;
}
static int perf_buildid_cache_config(const char *var, const char *value, void *cb)
{
struct perf_debuginfod *di = cb;
if (!strcmp(var, "buildid-cache.debuginfod")) {
di->urls = strdup(value);
if (!di->urls)
return -ENOMEM;
di->set = true;
}
return 0;
}
int cmd_buildid_cache(int argc, const char **argv)
{
struct strlist *list;
struct str_node *pos;
int ret, ns_id = -1;
bool force = false;
bool list_files = false;
bool opts_flag = false;
bool purge_all = false;
char const *add_name_list_str = NULL,
*remove_name_list_str = NULL,
*purge_name_list_str = NULL,
*missing_filename = NULL,
*update_name_list_str = NULL,
*kcore_filename = NULL;
struct perf_debuginfod debuginfod = { };
char sbuf[STRERR_BUFSIZE];
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
struct perf_session *session = NULL;
struct nsinfo *nsi = NULL;
const struct option buildid_cache_options[] = {
OPT_STRING('a', "add", &add_name_list_str,
"file list", "file(s) to add"),
OPT_STRING('k', "kcore", &kcore_filename,
"file", "kcore file to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
OPT_STRING('p', "purge", &purge_name_list_str, "file list",
"file(s) to remove (remove old caches too)"),
OPT_BOOLEAN('P', "purge-all", &purge_all, "purge all cached files"),
OPT_BOOLEAN('l', "list", &list_files, "list all cached files"),
OPT_STRING('M', "missing", &missing_filename, "file",
"to find missing build ids in the cache"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_STRING('u', "update", &update_name_list_str, "file list",
"file(s) to update"),
OPT_STRING_OPTARG_SET(0, "debuginfod", &debuginfod.urls,
&debuginfod.set, "debuginfod urls",
"Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
"system"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_INTEGER(0, "target-ns", &ns_id, "target pid for namespace context"),
OPT_END()
};
const char * const buildid_cache_usage[] = {
"perf buildid-cache [<options>]",
NULL
};
ret = perf_config(perf_buildid_cache_config, &debuginfod);
if (ret)
return ret;
argc = parse_options(argc, argv, buildid_cache_options,
buildid_cache_usage, 0);
opts_flag = add_name_list_str || kcore_filename ||
remove_name_list_str || purge_name_list_str ||
missing_filename || update_name_list_str ||
purge_all;
if (argc || !(list_files || opts_flag))
usage_with_options(buildid_cache_usage, buildid_cache_options);
perf_debuginfod_setup(&debuginfod);
/* -l is exclusive. It can not be used with other options. */
if (list_files && opts_flag) {
usage_with_options_msg(buildid_cache_usage,
buildid_cache_options, "-l is exclusive.\n");
}
if (ns_id > 0)
nsi = nsinfo__new(ns_id);
if (missing_filename) {
data.path = missing_filename;
data.force = force;
session = perf_session__new(&data, NULL);
if (IS_ERR(session))
return PTR_ERR(session);
}
if (symbol__init(session ? &session->header.env : NULL) < 0)
goto out;
setup_pager();
if (list_files) {
ret = build_id_cache__show_all();
goto out;
}
if (add_name_list_str) {
list = strlist__new(add_name_list_str, NULL);
if (list) {
strlist__for_each_entry(pos, list)
if (build_id_cache__add_file(pos->s, nsi)) {
if (errno == EEXIST) {
pr_debug("%s already in the cache\n",
pos->s);
continue;
}
pr_warning("Couldn't add %s: %s\n",
pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
}
strlist__delete(list);
}
}
if (remove_name_list_str) {
list = strlist__new(remove_name_list_str, NULL);
if (list) {
strlist__for_each_entry(pos, list)
if (build_id_cache__remove_file(pos->s, nsi)) {
if (errno == ENOENT) {
pr_debug("%s wasn't in the cache\n",
pos->s);
continue;
}
pr_warning("Couldn't remove %s: %s\n",
pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
}
strlist__delete(list);
}
}
if (purge_name_list_str) {
list = strlist__new(purge_name_list_str, NULL);
if (list) {
strlist__for_each_entry(pos, list)
if (build_id_cache__purge_path(pos->s, nsi)) {
if (errno == ENOENT) {
pr_debug("%s wasn't in the cache\n",
pos->s);
continue;
}
pr_warning("Couldn't remove %s: %s\n",
pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
}
strlist__delete(list);
}
}
if (purge_all) {
if (build_id_cache__purge_all()) {
pr_warning("Couldn't remove some caches. Error: %s.\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
}
}
if (missing_filename)
ret = build_id_cache__fprintf_missing(session, stdout);
if (update_name_list_str) {
list = strlist__new(update_name_list_str, NULL);
if (list) {
strlist__for_each_entry(pos, list)
if (build_id_cache__update_file(pos->s, nsi)) {
if (errno == ENOENT) {
pr_debug("%s wasn't in the cache\n",
pos->s);
continue;
}
pr_warning("Couldn't update %s: %s\n",
pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
}
strlist__delete(list);
}
}
if (kcore_filename && build_id_cache__add_kcore(kcore_filename, force))
pr_warning("Couldn't add %s\n", kcore_filename);
out:
perf_session__delete(session);
nsinfo__zput(nsi);
return ret;
}
| linux-master | tools/perf/builtin-buildid-cache.c |
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
#include "util/counts.h"
#include "util/debug.h"
#include "util/dso.h"
#include <subcmd/exec-cmd.h>
#include "util/header.h"
#include <subcmd/parse-options.h>
#include "util/perf_regs.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/map.h"
#include "util/srcline.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
#include "util/env.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
#include "util/evswitch.h"
#include "util/sort.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/stat.h"
#include "util/color.h"
#include "util/string2.h"
#include "util/thread-stack.h"
#include "util/time-utils.h"
#include "util/path.h"
#include "util/event.h"
#include "ui/ui.h"
#include "print_binary.h"
#include "archinsn.h"
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <sys/utsname.h>
#include "asm/bug.h"
#include "util/mem-events.h"
#include "util/dump-insn.h"
#include <dirent.h>
#include <errno.h>
#include <inttypes.h>
#include <signal.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <subcmd/pager.h>
#include <perf/evlist.h>
#include <linux/err.h>
#include "util/dlfilter.h"
#include "util/record.h"
#include "util/util.h"
#include "util/cgroup.h"
#include "perf.h"
#include <linux/ctype.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
static char const *script_name;
static char const *generate_script_lang;
static bool reltime;
static bool deltatime;
static u64 initial_time;
static u64 previous_time;
static bool debug_mode;
static u64 last_timestamp;
static u64 nr_unordered;
static bool no_callchain;
static bool latency_format;
static bool system_wide;
static bool print_flags;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
static int max_blocks;
static bool native_arch;
static struct dlfilter *dlfilter;
static int dlargc;
static char **dlargv;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
enum perf_output_field {
PERF_OUTPUT_COMM = 1ULL << 0,
PERF_OUTPUT_TID = 1ULL << 1,
PERF_OUTPUT_PID = 1ULL << 2,
PERF_OUTPUT_TIME = 1ULL << 3,
PERF_OUTPUT_CPU = 1ULL << 4,
PERF_OUTPUT_EVNAME = 1ULL << 5,
PERF_OUTPUT_TRACE = 1ULL << 6,
PERF_OUTPUT_IP = 1ULL << 7,
PERF_OUTPUT_SYM = 1ULL << 8,
PERF_OUTPUT_DSO = 1ULL << 9,
PERF_OUTPUT_ADDR = 1ULL << 10,
PERF_OUTPUT_SYMOFFSET = 1ULL << 11,
PERF_OUTPUT_SRCLINE = 1ULL << 12,
PERF_OUTPUT_PERIOD = 1ULL << 13,
PERF_OUTPUT_IREGS = 1ULL << 14,
PERF_OUTPUT_BRSTACK = 1ULL << 15,
PERF_OUTPUT_BRSTACKSYM = 1ULL << 16,
PERF_OUTPUT_DATA_SRC = 1ULL << 17,
PERF_OUTPUT_WEIGHT = 1ULL << 18,
PERF_OUTPUT_BPF_OUTPUT = 1ULL << 19,
PERF_OUTPUT_CALLINDENT = 1ULL << 20,
PERF_OUTPUT_INSN = 1ULL << 21,
PERF_OUTPUT_INSNLEN = 1ULL << 22,
PERF_OUTPUT_BRSTACKINSN = 1ULL << 23,
PERF_OUTPUT_BRSTACKOFF = 1ULL << 24,
PERF_OUTPUT_SYNTH = 1ULL << 25,
PERF_OUTPUT_PHYS_ADDR = 1ULL << 26,
PERF_OUTPUT_UREGS = 1ULL << 27,
PERF_OUTPUT_METRIC = 1ULL << 28,
PERF_OUTPUT_MISC = 1ULL << 29,
PERF_OUTPUT_SRCCODE = 1ULL << 30,
PERF_OUTPUT_IPC = 1ULL << 31,
PERF_OUTPUT_TOD = 1ULL << 32,
PERF_OUTPUT_DATA_PAGE_SIZE = 1ULL << 33,
PERF_OUTPUT_CODE_PAGE_SIZE = 1ULL << 34,
PERF_OUTPUT_INS_LAT = 1ULL << 35,
PERF_OUTPUT_BRSTACKINSNLEN = 1ULL << 36,
PERF_OUTPUT_MACHINE_PID = 1ULL << 37,
PERF_OUTPUT_VCPU = 1ULL << 38,
PERF_OUTPUT_CGROUP = 1ULL << 39,
PERF_OUTPUT_RETIRE_LAT = 1ULL << 40,
PERF_OUTPUT_DSOFF = 1ULL << 41,
};
struct perf_script {
struct perf_tool tool;
struct perf_session *session;
bool show_task_events;
bool show_mmap_events;
bool show_switch_events;
bool show_namespace_events;
bool show_lost_events;
bool show_round_events;
bool show_bpf_events;
bool show_cgroup_events;
bool show_text_poke_events;
bool allocated;
bool per_event_dump;
bool stitch_lbr;
struct evswitch evswitch;
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
int name_width;
const char *time_str;
struct perf_time_interval *ptime_range;
int range_size;
int range_num;
};
struct output_option {
const char *str;
enum perf_output_field field;
} all_output_options[] = {
{.str = "comm", .field = PERF_OUTPUT_COMM},
{.str = "tid", .field = PERF_OUTPUT_TID},
{.str = "pid", .field = PERF_OUTPUT_PID},
{.str = "time", .field = PERF_OUTPUT_TIME},
{.str = "cpu", .field = PERF_OUTPUT_CPU},
{.str = "event", .field = PERF_OUTPUT_EVNAME},
{.str = "trace", .field = PERF_OUTPUT_TRACE},
{.str = "ip", .field = PERF_OUTPUT_IP},
{.str = "sym", .field = PERF_OUTPUT_SYM},
{.str = "dso", .field = PERF_OUTPUT_DSO},
{.str = "dsoff", .field = PERF_OUTPUT_DSOFF},
{.str = "addr", .field = PERF_OUTPUT_ADDR},
{.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
{.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
{.str = "period", .field = PERF_OUTPUT_PERIOD},
{.str = "iregs", .field = PERF_OUTPUT_IREGS},
{.str = "uregs", .field = PERF_OUTPUT_UREGS},
{.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
{.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
{.str = "data_src", .field = PERF_OUTPUT_DATA_SRC},
{.str = "weight", .field = PERF_OUTPUT_WEIGHT},
{.str = "bpf-output", .field = PERF_OUTPUT_BPF_OUTPUT},
{.str = "callindent", .field = PERF_OUTPUT_CALLINDENT},
{.str = "insn", .field = PERF_OUTPUT_INSN},
{.str = "insnlen", .field = PERF_OUTPUT_INSNLEN},
{.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN},
{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
{.str = "synth", .field = PERF_OUTPUT_SYNTH},
{.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
{.str = "metric", .field = PERF_OUTPUT_METRIC},
{.str = "misc", .field = PERF_OUTPUT_MISC},
{.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
{.str = "ipc", .field = PERF_OUTPUT_IPC},
{.str = "tod", .field = PERF_OUTPUT_TOD},
{.str = "data_page_size", .field = PERF_OUTPUT_DATA_PAGE_SIZE},
{.str = "code_page_size", .field = PERF_OUTPUT_CODE_PAGE_SIZE},
{.str = "ins_lat", .field = PERF_OUTPUT_INS_LAT},
{.str = "brstackinsnlen", .field = PERF_OUTPUT_BRSTACKINSNLEN},
{.str = "machine_pid", .field = PERF_OUTPUT_MACHINE_PID},
{.str = "vcpu", .field = PERF_OUTPUT_VCPU},
{.str = "cgroup", .field = PERF_OUTPUT_CGROUP},
{.str = "retire_lat", .field = PERF_OUTPUT_RETIRE_LAT},
};
enum {
OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
OUTPUT_TYPE_OTHER,
OUTPUT_TYPE_MAX
};
/* default set to maintain compatibility with current format */
static struct {
bool user_set;
bool wildcard_set;
unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
u64 user_set_fields;
u64 user_unset_fields;
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[PERF_TYPE_SOFTWARE] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
PERF_OUTPUT_BPF_OUTPUT,
.invalid_fields = PERF_OUTPUT_TRACE,
},
[PERF_TYPE_TRACEPOINT] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
},
[PERF_TYPE_HW_CACHE] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[PERF_TYPE_RAW] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC |
PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR |
PERF_OUTPUT_DATA_PAGE_SIZE | PERF_OUTPUT_CODE_PAGE_SIZE |
PERF_OUTPUT_INS_LAT | PERF_OUTPUT_RETIRE_LAT,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[PERF_TYPE_BREAKPOINT] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[OUTPUT_TYPE_SYNTH] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_SYNTH,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[OUTPUT_TYPE_OTHER] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
};
struct evsel_script {
char *filename;
FILE *fp;
u64 samples;
/* For metric output */
u64 val;
int gnum;
};
static inline struct evsel_script *evsel_script(struct evsel *evsel)
{
return (struct evsel_script *)evsel->priv;
}
static struct evsel_script *evsel_script__new(struct evsel *evsel, struct perf_data *data)
{
struct evsel_script *es = zalloc(sizeof(*es));
if (es != NULL) {
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, evsel__name(evsel)) < 0)
goto out_free;
es->fp = fopen(es->filename, "w");
if (es->fp == NULL)
goto out_free_filename;
}
return es;
out_free_filename:
zfree(&es->filename);
out_free:
free(es);
return NULL;
}
static void evsel_script__delete(struct evsel_script *es)
{
zfree(&es->filename);
fclose(es->fp);
es->fp = NULL;
free(es);
}
static int evsel_script__fprintf(struct evsel_script *es, FILE *fp)
{
struct stat st;
fstat(fileno(es->fp), &st);
return fprintf(fp, "[ perf script: Wrote %.3f MB %s (%" PRIu64 " samples) ]\n",
st.st_size / 1024.0 / 1024.0, es->filename, es->samples);
}
static inline int output_type(unsigned int type)
{
switch (type) {
case PERF_TYPE_SYNTH:
return OUTPUT_TYPE_SYNTH;
default:
if (type < PERF_TYPE_MAX)
return type;
}
return OUTPUT_TYPE_OTHER;
}
static bool output_set_by_user(void)
{
int j;
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
if (output[j].user_set)
return true;
}
return false;
}
static const char *output_field2str(enum perf_output_field field)
{
int i, imax = ARRAY_SIZE(all_output_options);
const char *str = "";
for (i = 0; i < imax; ++i) {
if (all_output_options[i].field == field) {
str = all_output_options[i].str;
break;
}
}
return str;
}
#define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
static int evsel__do_check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
enum perf_output_field field, bool allow_user_set)
{
struct perf_event_attr *attr = &evsel->core.attr;
int type = output_type(attr->type);
const char *evname;
if (attr->sample_type & sample_type)
return 0;
if (output[type].user_set_fields & field) {
if (allow_user_set)
return 0;
evname = evsel__name(evsel);
pr_err("Samples for '%s' event do not have %s attribute set. "
"Cannot print '%s' field.\n",
evname, sample_msg, output_field2str(field));
return -1;
}
/* user did not ask for it explicitly so remove from the default list */
output[type].fields &= ~field;
evname = evsel__name(evsel);
pr_debug("Samples for '%s' event do not have %s attribute set. "
"Skipping '%s' field.\n",
evname, sample_msg, output_field2str(field));
return 0;
}
static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
enum perf_output_field field)
{
return evsel__do_check_stype(evsel, sample_type, sample_msg, field, false);
}
static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
{
struct perf_event_attr *attr = &evsel->core.attr;
bool allow_user_set;
if (evsel__is_dummy_event(evsel))
return 0;
if (perf_header__has_feat(&session->header, HEADER_STAT))
return 0;
allow_user_set = perf_header__has_feat(&session->header,
HEADER_AUXTRACE);
if (PRINT_FIELD(TRACE) &&
!perf_session__has_traces(session, "record -R"))
return -EINVAL;
if (PRINT_FIELD(IP)) {
if (evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", PERF_OUTPUT_IP))
return -EINVAL;
}
if (PRINT_FIELD(ADDR) &&
evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR", PERF_OUTPUT_ADDR, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(DATA_SRC) &&
evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(WEIGHT) &&
evsel__do_check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(SYM) &&
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
pr_err("Display of symbols requested but neither sample IP nor "
"sample address\navailable. Hence, no addresses to convert "
"to symbols.\n");
return -EINVAL;
}
if (PRINT_FIELD(SYMOFFSET) && !PRINT_FIELD(SYM)) {
pr_err("Display of offsets requested but symbol is not"
"selected.\n");
return -EINVAL;
}
if (PRINT_FIELD(DSO) &&
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
pr_err("Display of DSO requested but no address to convert.\n");
return -EINVAL;
}
if ((PRINT_FIELD(SRCLINE) || PRINT_FIELD(SRCCODE)) && !PRINT_FIELD(IP)) {
pr_err("Display of source line number requested but sample IP is not\n"
"selected. Hence, no address to lookup the source line number.\n");
return -EINVAL;
}
if ((PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN)) && !allow_user_set &&
!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) {
pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
"Hint: run 'perf record -b ...'\n");
return -EINVAL;
}
if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", PERF_OUTPUT_TID|PERF_OUTPUT_PID))
return -EINVAL;
if (PRINT_FIELD(TIME) &&
evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME", PERF_OUTPUT_TIME))
return -EINVAL;
if (PRINT_FIELD(CPU) &&
evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU", PERF_OUTPUT_CPU, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(IREGS) &&
evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(UREGS) &&
evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS", PERF_OUTPUT_UREGS))
return -EINVAL;
if (PRINT_FIELD(PHYS_ADDR) &&
evsel__do_check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(DATA_PAGE_SIZE) &&
evsel__check_stype(evsel, PERF_SAMPLE_DATA_PAGE_SIZE, "DATA_PAGE_SIZE", PERF_OUTPUT_DATA_PAGE_SIZE))
return -EINVAL;
if (PRINT_FIELD(CODE_PAGE_SIZE) &&
evsel__check_stype(evsel, PERF_SAMPLE_CODE_PAGE_SIZE, "CODE_PAGE_SIZE", PERF_OUTPUT_CODE_PAGE_SIZE))
return -EINVAL;
if (PRINT_FIELD(INS_LAT) &&
evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_INS_LAT))
return -EINVAL;
if (PRINT_FIELD(CGROUP) &&
evsel__check_stype(evsel, PERF_SAMPLE_CGROUP, "CGROUP", PERF_OUTPUT_CGROUP)) {
pr_err("Hint: run 'perf record --all-cgroups ...'\n");
return -EINVAL;
}
if (PRINT_FIELD(RETIRE_LAT) &&
evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_RETIRE_LAT))
return -EINVAL;
return 0;
}
static void set_print_ip_opts(struct perf_event_attr *attr)
{
unsigned int type = output_type(attr->type);
output[type].print_ip_opts = 0;
if (PRINT_FIELD(IP))
output[type].print_ip_opts |= EVSEL__PRINT_IP;
if (PRINT_FIELD(SYM))
output[type].print_ip_opts |= EVSEL__PRINT_SYM;
if (PRINT_FIELD(DSO))
output[type].print_ip_opts |= EVSEL__PRINT_DSO;
if (PRINT_FIELD(DSOFF))
output[type].print_ip_opts |= EVSEL__PRINT_DSOFF;
if (PRINT_FIELD(SYMOFFSET))
output[type].print_ip_opts |= EVSEL__PRINT_SYMOFFSET;
if (PRINT_FIELD(SRCLINE))
output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
}
static struct evsel *find_first_output_type(struct evlist *evlist,
unsigned int type)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_dummy_event(evsel))
continue;
if (output_type(evsel->core.attr.type) == (int)type)
return evsel;
}
return NULL;
}
/*
* verify all user requested events exist and the samples
* have the expected data
*/
static int perf_session__check_output_opt(struct perf_session *session)
{
bool tod = false;
unsigned int j;
struct evsel *evsel;
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
evsel = find_first_output_type(session->evlist, j);
/*
* even if fields is set to 0 (ie., show nothing) event must
* exist if user explicitly includes it on the command line
*/
if (!evsel && output[j].user_set && !output[j].wildcard_set &&
j != OUTPUT_TYPE_SYNTH) {
pr_err("%s events do not exist. "
"Remove corresponding -F option to proceed.\n",
event_type(j));
return -1;
}
if (evsel && output[j].fields &&
evsel__check_attr(evsel, session))
return -1;
if (evsel == NULL)
continue;
/* 'dsoff' implys 'dso' field */
if (output[j].fields & PERF_OUTPUT_DSOFF)
output[j].fields |= PERF_OUTPUT_DSO;
set_print_ip_opts(&evsel->core.attr);
tod |= output[j].fields & PERF_OUTPUT_TOD;
}
if (!no_callchain) {
bool use_callchain = false;
bool not_pipe = false;
evlist__for_each_entry(session->evlist, evsel) {
not_pipe = true;
if (evsel__has_callchain(evsel)) {
use_callchain = true;
break;
}
}
if (not_pipe && !use_callchain)
symbol_conf.use_callchain = false;
}
/*
* set default for tracepoints to print symbols only
* if callchains are present
*/
if (symbol_conf.use_callchain &&
!output[PERF_TYPE_TRACEPOINT].user_set) {
j = PERF_TYPE_TRACEPOINT;
evlist__for_each_entry(session->evlist, evsel) {
if (evsel->core.attr.type != j)
continue;
if (evsel__has_callchain(evsel)) {
output[j].fields |= PERF_OUTPUT_IP;
output[j].fields |= PERF_OUTPUT_SYM;
output[j].fields |= PERF_OUTPUT_SYMOFFSET;
output[j].fields |= PERF_OUTPUT_DSO;
set_print_ip_opts(&evsel->core.attr);
goto out;
}
}
}
if (tod && !session->header.env.clock.enabled) {
pr_err("Can't provide 'tod' time, missing clock data. "
"Please record with -k/--clockid option.\n");
return -1;
}
out:
return 0;
}
static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask, const char *arch,
FILE *fp)
{
unsigned i = 0, r;
int printed = 0;
if (!regs || !regs->regs)
return 0;
printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++];
printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r, arch), val);
}
return printed;
}
#define DEFAULT_TOD_FMT "%F %H:%M:%S"
static char*
tod_scnprintf(struct perf_script *script, char *buf, int buflen,
u64 timestamp)
{
u64 tod_ns, clockid_ns;
struct perf_env *env;
unsigned long nsec;
struct tm ltime;
char date[64];
time_t sec;
buf[0] = '\0';
if (buflen < 64 || !script)
return buf;
env = &script->session->header.env;
if (!env->clock.enabled) {
scnprintf(buf, buflen, "disabled");
return buf;
}
clockid_ns = env->clock.clockid_ns;
tod_ns = env->clock.tod_ns;
if (timestamp > clockid_ns)
tod_ns += timestamp - clockid_ns;
else
tod_ns -= clockid_ns - timestamp;
sec = (time_t) (tod_ns / NSEC_PER_SEC);
nsec = tod_ns - sec * NSEC_PER_SEC;
if (localtime_r(&sec, <ime) == NULL) {
scnprintf(buf, buflen, "failed");
} else {
strftime(date, sizeof(date), DEFAULT_TOD_FMT, <ime);
if (symbol_conf.nanosecs) {
snprintf(buf, buflen, "%s.%09lu", date, nsec);
} else {
snprintf(buf, buflen, "%s.%06lu",
date, nsec / NSEC_PER_USEC);
}
}
return buf;
}
static int perf_sample__fprintf_iregs(struct perf_sample *sample,
struct perf_event_attr *attr, const char *arch, FILE *fp)
{
return perf_sample__fprintf_regs(&sample->intr_regs,
attr->sample_regs_intr, arch, fp);
}
static int perf_sample__fprintf_uregs(struct perf_sample *sample,
struct perf_event_attr *attr, const char *arch, FILE *fp)
{
return perf_sample__fprintf_regs(&sample->user_regs,
attr->sample_regs_user, arch, fp);
}
static int perf_sample__fprintf_start(struct perf_script *script,
struct perf_sample *sample,
struct thread *thread,
struct evsel *evsel,
u32 type, FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
unsigned long secs;
unsigned long long nsecs;
int printed = 0;
char tstr[128];
if (PRINT_FIELD(MACHINE_PID) && sample->machine_pid)
printed += fprintf(fp, "VM:%5d ", sample->machine_pid);
/* Print VCPU only for guest events i.e. with machine_pid */
if (PRINT_FIELD(VCPU) && sample->machine_pid)
printed += fprintf(fp, "VCPU:%03d ", sample->vcpu);
if (PRINT_FIELD(COMM)) {
const char *comm = thread ? thread__comm_str(thread) : ":-1";
if (latency_format)
printed += fprintf(fp, "%8.8s ", comm);
else if (PRINT_FIELD(IP) && evsel__has_callchain(evsel) && symbol_conf.use_callchain)
printed += fprintf(fp, "%s ", comm);
else
printed += fprintf(fp, "%16s ", comm);
}
if (PRINT_FIELD(PID) && PRINT_FIELD(TID))
printed += fprintf(fp, "%7d/%-7d ", sample->pid, sample->tid);
else if (PRINT_FIELD(PID))
printed += fprintf(fp, "%7d ", sample->pid);
else if (PRINT_FIELD(TID))
printed += fprintf(fp, "%7d ", sample->tid);
if (PRINT_FIELD(CPU)) {
if (latency_format)
printed += fprintf(fp, "%3d ", sample->cpu);
else
printed += fprintf(fp, "[%03d] ", sample->cpu);
}
if (PRINT_FIELD(MISC)) {
int ret = 0;
#define has(m) \
(sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m
if (has(KERNEL))
ret += fprintf(fp, "K");
if (has(USER))
ret += fprintf(fp, "U");
if (has(HYPERVISOR))
ret += fprintf(fp, "H");
if (has(GUEST_KERNEL))
ret += fprintf(fp, "G");
if (has(GUEST_USER))
ret += fprintf(fp, "g");
switch (type) {
case PERF_RECORD_MMAP:
case PERF_RECORD_MMAP2:
if (has(MMAP_DATA))
ret += fprintf(fp, "M");
break;
case PERF_RECORD_COMM:
if (has(COMM_EXEC))
ret += fprintf(fp, "E");
break;
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
if (has(SWITCH_OUT)) {
ret += fprintf(fp, "S");
if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT)
ret += fprintf(fp, "p");
}
default:
break;
}
#undef has
ret += fprintf(fp, "%*s", 6 - ret, " ");
printed += ret;
}
if (PRINT_FIELD(TOD)) {
tod_scnprintf(script, tstr, sizeof(tstr), sample->time);
printed += fprintf(fp, "%s ", tstr);
}
if (PRINT_FIELD(TIME)) {
u64 t = sample->time;
if (reltime) {
if (!initial_time)
initial_time = sample->time;
t = sample->time - initial_time;
} else if (deltatime) {
if (previous_time)
t = sample->time - previous_time;
else {
t = 0;
}
previous_time = sample->time;
}
nsecs = t;
secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
if (symbol_conf.nanosecs)
printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
else {
char sample_time[32];
timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
printed += fprintf(fp, "%12s: ", sample_time);
}
}
return printed;
}
static inline char
mispred_str(struct branch_entry *br)
{
if (!(br->flags.mispred || br->flags.predicted))
return '-';
return br->flags.predicted ? 'P' : 'M';
}
static int print_bstack_flags(FILE *fp, struct branch_entry *br)
{
return fprintf(fp, "/%c/%c/%c/%d/%s/%s ",
mispred_str(br),
br->flags.in_tx ? 'X' : '-',
br->flags.abort ? 'A' : '-',
br->flags.cycles,
get_branch_type(br),
br->flags.spec ? branch_spec_desc(br->flags.spec) : "-");
}
static int perf_sample__fprintf_brstack(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
u64 i, from, to;
int printed = 0;
if (!(br && br->nr))
return 0;
for (i = 0; i < br->nr; i++) {
from = entries[i].from;
to = entries[i].to;
printed += fprintf(fp, " 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
struct addr_location alf, alt;
addr_location__init(&alf);
addr_location__init(&alt);
thread__find_map_fb(thread, sample->cpumode, from, &alf);
thread__find_map_fb(thread, sample->cpumode, to, &alt);
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
printed += fprintf(fp, "/0x%"PRIx64, to);
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
addr_location__exit(&alt);
addr_location__exit(&alf);
} else
printed += fprintf(fp, "/0x%"PRIx64, to);
printed += print_bstack_flags(fp, entries + i);
}
return printed;
}
static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
u64 i, from, to;
int printed = 0;
if (!(br && br->nr))
return 0;
for (i = 0; i < br->nr; i++) {
struct addr_location alf, alt;
addr_location__init(&alf);
addr_location__init(&alt);
from = entries[i].from;
to = entries[i].to;
thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
if (PRINT_FIELD(DSO))
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
printed += fprintf(fp, "%c", '/');
printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp);
if (PRINT_FIELD(DSO))
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
printed += print_bstack_flags(fp, entries + i);
addr_location__exit(&alt);
addr_location__exit(&alf);
}
return printed;
}
static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
u64 i, from, to;
int printed = 0;
if (!(br && br->nr))
return 0;
for (i = 0; i < br->nr; i++) {
struct addr_location alf, alt;
addr_location__init(&alf);
addr_location__init(&alt);
from = entries[i].from;
to = entries[i].to;
if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
!map__dso(alf.map)->adjust_symbols)
from = map__dso_map_ip(alf.map, from);
if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
!map__dso(alt.map)->adjust_symbols)
to = map__dso_map_ip(alt.map, to);
printed += fprintf(fp, " 0x%"PRIx64, from);
if (PRINT_FIELD(DSO))
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
printed += fprintf(fp, "/0x%"PRIx64, to);
if (PRINT_FIELD(DSO))
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
printed += print_bstack_flags(fp, entries + i);
addr_location__exit(&alt);
addr_location__exit(&alf);
}
return printed;
}
#define MAXBB 16384UL
static int grab_bb(u8 *buffer, u64 start, u64 end,
struct machine *machine, struct thread *thread,
bool *is64bit, u8 *cpumode, bool last)
{
long offset, len;
struct addr_location al;
bool kernel;
struct dso *dso;
int ret = 0;
if (!start || !end)
return 0;
kernel = machine__kernel_ip(machine, start);
if (kernel)
*cpumode = PERF_RECORD_MISC_KERNEL;
else
*cpumode = PERF_RECORD_MISC_USER;
/*
* Block overlaps between kernel and user.
* This can happen due to ring filtering
* On Intel CPUs the entry into the kernel is filtered,
* but the exit is not. Let the caller patch it up.
*/
if (kernel != machine__kernel_ip(machine, end)) {
pr_debug("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", start, end);
return -ENXIO;
}
if (end - start > MAXBB - MAXINSN) {
if (last)
pr_debug("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end);
else
pr_debug("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start);
return 0;
}
addr_location__init(&al);
if (!thread__find_map(thread, *cpumode, start, &al) || (dso = map__dso(al.map)) == NULL) {
pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
goto out;
}
if (dso->data.status == DSO_DATA_STATUS_ERROR) {
pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
goto out;
}
/* Load maps to ensure dso->is_64_bit has been updated */
map__load(al.map);
offset = map__map_ip(al.map, start);
len = dso__data_read_offset(dso, machine, offset, (u8 *)buffer,
end - start + MAXINSN);
*is64bit = dso->is_64_bit;
if (len <= 0)
pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n",
start, end);
ret = len;
out:
addr_location__exit(&al);
return ret;
}
static int map__fprintf_srccode(struct map *map, u64 addr, FILE *fp, struct srccode_state *state)
{
char *srcfile;
int ret = 0;
unsigned line;
int len;
char *srccode;
struct dso *dso;
if (!map || (dso = map__dso(map)) == NULL)
return 0;
srcfile = get_srcline_split(dso,
map__rip_2objdump(map, addr),
&line);
if (!srcfile)
return 0;
/* Avoid redundant printing */
if (state &&
state->srcfile &&
!strcmp(state->srcfile, srcfile) &&
state->line == line) {
free(srcfile);
return 0;
}
srccode = find_sourceline(srcfile, line, &len);
if (!srccode)
goto out_free_line;
ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
if (state) {
state->srcfile = srcfile;
state->line = line;
}
return ret;
out_free_line:
free(srcfile);
return ret;
}
static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
{
struct addr_location al;
int ret = 0;
addr_location__init(&al);
thread__find_map(thread, cpumode, addr, &al);
if (!al.map)
goto out;
ret = map__fprintf_srccode(al.map, al.addr, stdout,
thread__srccode_state(thread));
if (ret)
ret += printf("\n");
out:
addr_location__exit(&al);
return ret;
}
static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
struct perf_insn *x, u8 *inbuf, int len,
int insn, FILE *fp, int *total_cycles,
struct perf_event_attr *attr)
{
int ilen = 0;
int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t", ip,
dump_insn(x, ip, inbuf, len, &ilen));
if (PRINT_FIELD(BRSTACKINSNLEN))
printed += fprintf(fp, "ilen: %d\t", ilen);
printed += fprintf(fp, "#%s%s%s%s",
en->flags.predicted ? " PRED" : "",
en->flags.mispred ? " MISPRED" : "",
en->flags.in_tx ? " INTX" : "",
en->flags.abort ? " ABORT" : "");
if (en->flags.cycles) {
*total_cycles += en->flags.cycles;
printed += fprintf(fp, " %d cycles [%d]", en->flags.cycles, *total_cycles);
if (insn)
printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles);
}
return printed + fprintf(fp, "\n");
}
static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
u8 cpumode, int cpu, struct symbol **lastsym,
struct perf_event_attr *attr, FILE *fp)
{
struct addr_location al;
int off, printed = 0, ret = 0;
addr_location__init(&al);
thread__find_map(thread, cpumode, addr, &al);
if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
goto out;
al.cpu = cpu;
al.sym = NULL;
if (al.map)
al.sym = map__find_symbol(al.map, al.addr);
if (!al.sym)
goto out;
if (al.addr < al.sym->end)
off = al.addr - al.sym->start;
else
off = al.addr - map__start(al.map) - al.sym->start;
printed += fprintf(fp, "\t%s", al.sym->name);
if (off)
printed += fprintf(fp, "%+d", off);
printed += fprintf(fp, ":");
if (PRINT_FIELD(SRCLINE))
printed += map__fprintf_srcline(al.map, al.addr, "\t", fp);
printed += fprintf(fp, "\n");
*lastsym = al.sym;
ret = printed;
out:
addr_location__exit(&al);
return ret;
}
static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr,
struct machine *machine, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
u64 start, end;
int i, insn, len, nr, ilen, printed = 0;
struct perf_insn x;
u8 buffer[MAXBB];
unsigned off;
struct symbol *lastsym = NULL;
int total_cycles = 0;
if (!(br && br->nr))
return 0;
nr = br->nr;
if (max_blocks && nr > max_blocks + 1)
nr = max_blocks + 1;
x.thread = thread;
x.cpu = sample->cpu;
printed += fprintf(fp, "%c", '\n');
/* Handle first from jump, of which we don't know the entry. */
len = grab_bb(buffer, entries[nr-1].from,
entries[nr-1].from,
machine, thread, &x.is64bit, &x.cpumode, false);
if (len > 0) {
printed += ip__fprintf_sym(entries[nr - 1].from, thread,
x.cpumode, x.cpu, &lastsym, attr, fp);
printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1],
&x, buffer, len, 0, fp, &total_cycles,
attr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, entries[nr - 1].from);
}
/* Print all blocks */
for (i = nr - 2; i >= 0; i--) {
if (entries[i].from || entries[i].to)
pr_debug("%d: %" PRIx64 "-%" PRIx64 "\n", i,
entries[i].from,
entries[i].to);
start = entries[i + 1].to;
end = entries[i].from;
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
/* Patch up missing kernel transfers due to ring filters */
if (len == -ENXIO && i > 0) {
end = entries[--i].from;
pr_debug("\tpatching up to %" PRIx64 "-%" PRIx64 "\n", start, end);
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
}
if (len <= 0)
continue;
insn = 0;
for (off = 0; off < (unsigned)len; off += ilen) {
uint64_t ip = start + off;
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (ip == end) {
printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp,
&total_cycles, attr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, ip);
break;
} else {
ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t%s", ip,
dump_insn(&x, ip, buffer + off, len - off, &ilen));
if (PRINT_FIELD(BRSTACKINSNLEN))
printed += fprintf(fp, "\tilen: %d", ilen);
printed += fprintf(fp, "\n");
if (ilen == 0)
break;
if (PRINT_FIELD(SRCCODE))
print_srccode(thread, x.cpumode, ip);
insn++;
}
}
if (off != end - start)
printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
}
/*
* Hit the branch? In this case we are already done, and the target
* has not been executed yet.
*/
if (entries[0].from == sample->ip)
goto out;
if (entries[0].flags.abort)
goto out;
/*
* Print final block up to sample
*
* Due to pipeline delays the LBRs might be missing a branch
* or two, which can result in very large or negative blocks
* between final branch and sample. When this happens just
* continue walking after the last TO until we hit a branch.
*/
start = entries[0].to;
end = sample->ip;
if (end < start) {
/* Missing jump. Scan 128 bytes for the next branch */
end = start + 128;
}
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (len <= 0) {
/* Print at least last IP if basic block did not work */
len = grab_bb(buffer, sample->ip, sample->ip,
machine, thread, &x.is64bit, &x.cpumode, false);
if (len <= 0)
goto out;
ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t%s", sample->ip,
dump_insn(&x, sample->ip, buffer, len, &ilen));
if (PRINT_FIELD(BRSTACKINSNLEN))
printed += fprintf(fp, "\tilen: %d", ilen);
printed += fprintf(fp, "\n");
if (PRINT_FIELD(SRCCODE))
print_srccode(thread, x.cpumode, sample->ip);
goto out;
}
for (off = 0; off <= end - start; off += ilen) {
ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t%s", start + off,
dump_insn(&x, start + off, buffer + off, len - off, &ilen));
if (PRINT_FIELD(BRSTACKINSNLEN))
printed += fprintf(fp, "\tilen: %d", ilen);
printed += fprintf(fp, "\n");
if (ilen == 0)
break;
if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) {
/*
* Hit a missing branch. Just stop.
*/
printed += fprintf(fp, "\t... not reaching sample ...\n");
break;
}
if (PRINT_FIELD(SRCCODE))
print_srccode(thread, x.cpumode, start + off);
}
out:
return printed;
}
static int perf_sample__fprintf_addr(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr, FILE *fp)
{
struct addr_location al;
int printed = fprintf(fp, "%16" PRIx64, sample->addr);
addr_location__init(&al);
if (!sample_addr_correlates_sym(attr))
goto out;
thread__resolve(thread, &al, sample);
if (PRINT_FIELD(SYM)) {
printed += fprintf(fp, " ");
if (PRINT_FIELD(SYMOFFSET))
printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
else
printed += symbol__fprintf_symname(al.sym, fp);
}
if (PRINT_FIELD(DSO))
printed += map__fprintf_dsoname_dsoff(al.map, PRINT_FIELD(DSOFF), al.addr, fp);
out:
addr_location__exit(&al);
return printed;
}
static const char *resolve_branch_sym(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
struct addr_location *addr_al,
u64 *ip)
{
struct perf_event_attr *attr = &evsel->core.attr;
const char *name = NULL;
if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
if (sample_addr_correlates_sym(attr)) {
if (!addr_al->thread)
thread__resolve(thread, addr_al, sample);
if (addr_al->sym)
name = addr_al->sym->name;
else
*ip = sample->addr;
} else {
*ip = sample->addr;
}
} else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) {
if (al->sym)
name = al->sym->name;
else
*ip = sample->ip;
}
return name;
}
static int perf_sample__fprintf_callindent(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
struct addr_location *addr_al,
FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
size_t depth = thread_stack__depth(thread, sample->cpu);
const char *name = NULL;
static int spacing;
int len = 0;
int dlen = 0;
u64 ip = 0;
/*
* The 'return' has already been popped off the stack so the depth has
* to be adjusted to match the 'call'.
*/
if (thread__ts(thread) && sample->flags & PERF_IP_FLAG_RETURN)
depth += 1;
name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
dlen += fprintf(fp, "(");
dlen += map__fprintf_dsoname(al->map, fp);
dlen += fprintf(fp, ")\t");
}
if (name)
len = fprintf(fp, "%*s%s", (int)depth * 4, "", name);
else if (ip)
len = fprintf(fp, "%*s%16" PRIx64, (int)depth * 4, "", ip);
if (len < 0)
return len;
/*
* Try to keep the output length from changing frequently so that the
* output lines up more nicely.
*/
if (len > spacing || (len && len < spacing - 52))
spacing = round_up(len + 4, 32);
if (len < spacing)
len += fprintf(fp, "%*s", spacing - len, "");
return len + dlen;
}
__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
struct thread *thread __maybe_unused,
struct machine *machine __maybe_unused)
{
}
void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
struct machine *machine)
{
if (sample->insn_len == 0 && native_arch)
arch_fetch_insn(sample, thread, machine);
}
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr,
struct thread *thread,
struct machine *machine, FILE *fp)
{
int printed = 0;
script_fetch_insn(sample, thread, machine);
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
if (PRINT_FIELD(INSN) && sample->insn_len) {
int i;
printed += fprintf(fp, " insn:");
for (i = 0; i < sample->insn_len; i++)
printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]);
}
if (PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN))
printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp);
return printed;
}
static int perf_sample__fprintf_ipc(struct perf_sample *sample,
struct perf_event_attr *attr, FILE *fp)
{
unsigned int ipc;
if (!PRINT_FIELD(IPC) || !sample->cyc_cnt || !sample->insn_cnt)
return 0;
ipc = (sample->insn_cnt * 100) / sample->cyc_cnt;
return fprintf(fp, " \t IPC: %u.%02u (%" PRIu64 "/%" PRIu64 ") ",
ipc / 100, ipc % 100, sample->insn_cnt, sample->cyc_cnt);
}
static int perf_sample__fprintf_bts(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
struct addr_location *addr_al,
struct machine *machine, FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
unsigned int type = output_type(attr->type);
bool print_srcline_last = false;
int printed = 0;
if (PRINT_FIELD(CALLINDENT))
printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
/* print branch_from information */
if (PRINT_FIELD(IP)) {
unsigned int print_opts = output[type].print_ip_opts;
struct callchain_cursor *cursor = NULL;
if (symbol_conf.use_callchain && sample->callchain) {
cursor = get_tls_callchain_cursor();
if (thread__resolve_callchain(al->thread, cursor, evsel,
sample, NULL, NULL,
scripting_max_stack))
cursor = NULL;
}
if (cursor == NULL) {
printed += fprintf(fp, " ");
if (print_opts & EVSEL__PRINT_SRCLINE) {
print_srcline_last = true;
print_opts &= ~EVSEL__PRINT_SRCLINE;
}
} else
printed += fprintf(fp, "\n");
printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor,
symbol_conf.bt_stop_list, fp);
}
/* print branch_to information */
if (PRINT_FIELD(ADDR) ||
((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
!output[type].user_set)) {
printed += fprintf(fp, " => ");
printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
}
printed += perf_sample__fprintf_ipc(sample, attr, fp);
if (print_srcline_last)
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
printed += fprintf(fp, "\n");
if (PRINT_FIELD(SRCCODE)) {
int ret = map__fprintf_srccode(al->map, al->addr, stdout,
thread__srccode_state(thread));
if (ret) {
printed += ret;
printed += printf("\n");
}
}
return printed;
}
static struct {
u32 flags;
const char *name;
} sample_flags[] = {
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "jcc"},
{PERF_IP_FLAG_BRANCH, "jmp"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT, "int"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT, "iret"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET, "syscall"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET, "sysret"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "async"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | PERF_IP_FLAG_INTERRUPT, "hw int"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "tx abrt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "tr strt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "tr end"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMENTRY, "vmentry"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMEXIT, "vmexit"},
{0, NULL}
};
static const char *sample_flags_to_name(u32 flags)
{
int i;
for (i = 0; sample_flags[i].name ; i++) {
if (sample_flags[i].flags == flags)
return sample_flags[i].name;
}
return NULL;
}
int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz)
{
u32 xf = PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_INTR_DISABLE |
PERF_IP_FLAG_INTR_TOGGLE;
const char *chars = PERF_IP_FLAG_CHARS;
const size_t n = strlen(PERF_IP_FLAG_CHARS);
const char *name = NULL;
size_t i, pos = 0;
char xs[16] = {0};
if (flags & xf)
snprintf(xs, sizeof(xs), "(%s%s%s)",
flags & PERF_IP_FLAG_IN_TX ? "x" : "",
flags & PERF_IP_FLAG_INTR_DISABLE ? "D" : "",
flags & PERF_IP_FLAG_INTR_TOGGLE ? "t" : "");
name = sample_flags_to_name(flags & ~xf);
if (name)
return snprintf(str, sz, "%-15s%6s", name, xs);
if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_BEGIN));
if (name)
return snprintf(str, sz, "tr strt %-7s%6s", name, xs);
}
if (flags & PERF_IP_FLAG_TRACE_END) {
name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_END));
if (name)
return snprintf(str, sz, "tr end %-7s%6s", name, xs);
}
for (i = 0; i < n; i++, flags >>= 1) {
if ((flags & 1) && pos < sz)
str[pos++] = chars[i];
}
for (; i < 32; i++, flags >>= 1) {
if ((flags & 1) && pos < sz)
str[pos++] = '?';
}
if (pos < sz)
str[pos] = 0;
return pos;
}
static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
{
char str[SAMPLE_FLAGS_BUF_SIZE];
perf_sample__sprintf_flags(flags, str, sizeof(str));
return fprintf(fp, " %-21s ", str);
}
struct printer_data {
int line_no;
bool hit_nul;
bool is_printable;
};
static int sample__fprintf_bpf_output(enum binary_printer_ops op,
unsigned int val,
void *extra, FILE *fp)
{
unsigned char ch = (unsigned char)val;
struct printer_data *printer_data = extra;
int printed = 0;
switch (op) {
case BINARY_PRINT_DATA_BEGIN:
printed += fprintf(fp, "\n");
break;
case BINARY_PRINT_LINE_BEGIN:
printed += fprintf(fp, "%17s", !printer_data->line_no ? "BPF output:" :
" ");
break;
case BINARY_PRINT_ADDR:
printed += fprintf(fp, " %04x:", val);
break;
case BINARY_PRINT_NUM_DATA:
printed += fprintf(fp, " %02x", val);
break;
case BINARY_PRINT_NUM_PAD:
printed += fprintf(fp, " ");
break;
case BINARY_PRINT_SEP:
printed += fprintf(fp, " ");
break;
case BINARY_PRINT_CHAR_DATA:
if (printer_data->hit_nul && ch)
printer_data->is_printable = false;
if (!isprint(ch)) {
printed += fprintf(fp, "%c", '.');
if (!printer_data->is_printable)
break;
if (ch == '\0')
printer_data->hit_nul = true;
else
printer_data->is_printable = false;
} else {
printed += fprintf(fp, "%c", ch);
}
break;
case BINARY_PRINT_CHAR_PAD:
printed += fprintf(fp, " ");
break;
case BINARY_PRINT_LINE_END:
printed += fprintf(fp, "\n");
printer_data->line_no++;
break;
case BINARY_PRINT_DATA_END:
default:
break;
}
return printed;
}
static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp)
{
unsigned int nr_bytes = sample->raw_size;
struct printer_data printer_data = {0, false, true};
int printed = binary__fprintf(sample->raw_data, nr_bytes, 8,
sample__fprintf_bpf_output, &printer_data, fp);
if (printer_data.is_printable && printer_data.hit_nul)
printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data));
return printed;
}
static int perf_sample__fprintf_spacing(int len, int spacing, FILE *fp)
{
if (len > 0 && len < spacing)
return fprintf(fp, "%*s", spacing - len, "");
return 0;
}
static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
{
return perf_sample__fprintf_spacing(len, 34, fp);
}
/* If a value contains only printable ASCII characters padded with NULLs */
static bool ptw_is_prt(u64 val)
{
char c;
u32 i;
for (i = 0; i < sizeof(val); i++) {
c = ((char *)&val)[i];
if (!c)
break;
if (!isprint(c) || !isascii(c))
return false;
}
for (; i < sizeof(val); i++) {
c = ((char *)&val)[i];
if (c)
return false;
}
return true;
}
static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
char str[sizeof(u64) + 1] = "";
int len;
u64 val;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
val = le64_to_cpu(data->payload);
if (ptw_is_prt(val)) {
memcpy(str, &val, sizeof(val));
str[sizeof(val)] = 0;
}
len = fprintf(fp, " IP: %u payload: %#" PRIx64 " %s ",
data->ip, val, str);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
len = fprintf(fp, " hints: %#x extensions: %#x ",
data->hints, data->extensions);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
len = fprintf(fp, " hw: %u cstate: %u sub-cstate: %u ",
data->hw, data->cstate, data->subcstate);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
len = fprintf(fp, " IP: %u ", data->ip);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
len = fprintf(fp, " deepest cstate: %u last cstate: %u wake reason: %#x ",
data->deepest_cstate, data->last_cstate,
data->wake_reason);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
unsigned int percent, freq;
int len;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
freq = (le32_to_cpu(data->freq) + 500) / 1000;
len = fprintf(fp, " cbr: %2u freq: %4u MHz ", data->cbr, freq);
if (data->max_nonturbo) {
percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
len += fprintf(fp, "(%3u%%) ", percent);
}
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth_psb(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_psb *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
len = fprintf(fp, " psb offs: %#" PRIx64, data->offset);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
/* Intel PT Event Trace */
static int perf_sample__fprintf_synth_evt(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_evt *data = perf_sample__synth_ptr(sample);
const char *cfe[32] = {NULL, "INTR", "IRET", "SMI", "RSM", "SIPI",
"INIT", "VMENTRY", "VMEXIT", "VMEXIT_INTR",
"SHUTDOWN", NULL, "UINTR", "UIRET"};
const char *evd[64] = {"PFA", "VMXQ", "VMXR"};
const char *s;
int len, i;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
s = cfe[data->type];
if (s) {
len = fprintf(fp, " cfe: %s IP: %d vector: %u",
s, data->ip, data->vector);
} else {
len = fprintf(fp, " cfe: %u IP: %d vector: %u",
data->type, data->ip, data->vector);
}
for (i = 0; i < data->evd_cnt; i++) {
unsigned int et = data->evd[i].evd_type & 0x3f;
s = evd[et];
if (s) {
len += fprintf(fp, " %s: %#" PRIx64,
s, data->evd[i].payload);
} else {
len += fprintf(fp, " EVD_%u: %#" PRIx64,
et, data->evd[i].payload);
}
}
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth_iflag_chg(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_iflag_chg *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
len = fprintf(fp, " IFLAG: %d->%d %s branch", !data->iflag, data->iflag,
data->via_branch ? "via" : "non");
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
static int perf_sample__fprintf_synth(struct perf_sample *sample,
struct evsel *evsel, FILE *fp)
{
switch (evsel->core.attr.config) {
case PERF_SYNTH_INTEL_PTWRITE:
return perf_sample__fprintf_synth_ptwrite(sample, fp);
case PERF_SYNTH_INTEL_MWAIT:
return perf_sample__fprintf_synth_mwait(sample, fp);
case PERF_SYNTH_INTEL_PWRE:
return perf_sample__fprintf_synth_pwre(sample, fp);
case PERF_SYNTH_INTEL_EXSTOP:
return perf_sample__fprintf_synth_exstop(sample, fp);
case PERF_SYNTH_INTEL_PWRX:
return perf_sample__fprintf_synth_pwrx(sample, fp);
case PERF_SYNTH_INTEL_CBR:
return perf_sample__fprintf_synth_cbr(sample, fp);
case PERF_SYNTH_INTEL_PSB:
return perf_sample__fprintf_synth_psb(sample, fp);
case PERF_SYNTH_INTEL_EVT:
return perf_sample__fprintf_synth_evt(sample, fp);
case PERF_SYNTH_INTEL_IFLAG_CHG:
return perf_sample__fprintf_synth_iflag_chg(sample, fp);
default:
break;
}
return 0;
}
static int evlist__max_name_len(struct evlist *evlist)
{
struct evsel *evsel;
int max = 0;
evlist__for_each_entry(evlist, evsel) {
int len = strlen(evsel__name(evsel));
max = MAX(len, max);
}
return max;
}
static int data_src__fprintf(u64 data_src, FILE *fp)
{
struct mem_info mi = { .data_src.val = data_src };
char decode[100];
char out[100];
static int maxlen;
int len;
perf_script__meminfo_scnprintf(decode, 100, &mi);
len = scnprintf(out, 100, "%16" PRIx64 " %s", data_src, decode);
if (maxlen < len)
maxlen = len;
return fprintf(fp, "%-*s", maxlen, out);
}
struct metric_ctx {
struct perf_sample *sample;
struct thread *thread;
struct evsel *evsel;
FILE *fp;
};
static void script_print_metric(struct perf_stat_config *config __maybe_unused,
void *ctx, const char *color,
const char *fmt,
const char *unit, double val)
{
struct metric_ctx *mctx = ctx;
if (!fmt)
return;
perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
PERF_RECORD_SAMPLE, mctx->fp);
fputs("\tmetric: ", mctx->fp);
if (color)
color_fprintf(mctx->fp, color, fmt, val);
else
printf(fmt, val);
fprintf(mctx->fp, " %s\n", unit);
}
static void script_new_line(struct perf_stat_config *config __maybe_unused,
void *ctx)
{
struct metric_ctx *mctx = ctx;
perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
PERF_RECORD_SAMPLE, mctx->fp);
fputs("\tmetric: ", mctx->fp);
}
static void perf_sample__fprint_metric(struct perf_script *script,
struct thread *thread,
struct evsel *evsel,
struct perf_sample *sample,
FILE *fp)
{
struct evsel *leader = evsel__leader(evsel);
struct perf_stat_output_ctx ctx = {
.print_metric = script_print_metric,
.new_line = script_new_line,
.ctx = &(struct metric_ctx) {
.sample = sample,
.thread = thread,
.evsel = evsel,
.fp = fp,
},
.force_header = false,
};
struct evsel *ev2;
u64 val;
if (!evsel->stats)
evlist__alloc_stats(&stat_config, script->session->evlist, /*alloc_raw=*/false);
if (evsel_script(leader)->gnum++ == 0)
perf_stat__reset_shadow_stats();
val = sample->period * evsel->scale;
evsel_script(evsel)->val = val;
if (evsel_script(leader)->gnum == leader->core.nr_members) {
for_each_group_member (ev2, leader) {
perf_stat__print_shadow_stats(&stat_config, ev2,
evsel_script(ev2)->val,
sample->cpu,
&ctx,
NULL);
}
evsel_script(leader)->gnum = 0;
}
}
static bool show_event(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
struct addr_location *addr_al)
{
int depth = thread_stack__depth(thread, sample->cpu);
if (!symbol_conf.graph_function)
return true;
if (thread__filter(thread)) {
if (depth <= thread__filter_entry_depth(thread)) {
thread__set_filter(thread, false);
return false;
}
return true;
} else {
const char *s = symbol_conf.graph_function;
u64 ip;
const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
&ip);
unsigned nlen;
if (!name)
return false;
nlen = strlen(name);
while (*s) {
unsigned len = strcspn(s, ",");
if (nlen == len && !strncmp(name, s, len)) {
thread__set_filter(thread, true);
thread__set_filter_entry_depth(thread, depth);
return true;
}
s += len;
if (*s == ',')
s++;
}
return false;
}
}
static void process_event(struct perf_script *script,
struct perf_sample *sample, struct evsel *evsel,
struct addr_location *al,
struct addr_location *addr_al,
struct machine *machine)
{
struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->core.attr;
unsigned int type = output_type(attr->type);
struct evsel_script *es = evsel->priv;
FILE *fp = es->fp;
char str[PAGE_SIZE_NAME_LEN];
const char *arch = perf_env__arch(machine->env);
if (output[type].fields == 0)
return;
++es->samples;
perf_sample__fprintf_start(script, sample, thread, evsel,
PERF_RECORD_SAMPLE, fp);
if (PRINT_FIELD(PERIOD))
fprintf(fp, "%10" PRIu64 " ", sample->period);
if (PRINT_FIELD(EVNAME)) {
const char *evname = evsel__name(evsel);
if (!script->name_width)
script->name_width = evlist__max_name_len(script->session->evlist);
fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]");
}
if (print_flags)
perf_sample__fprintf_flags(sample->flags, fp);
if (is_bts_event(attr)) {
perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
return;
}
#ifdef HAVE_LIBTRACEEVENT
if (PRINT_FIELD(TRACE) && sample->raw_data) {
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size, fp);
}
#endif
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
perf_sample__fprintf_synth(sample, evsel, fp);
if (PRINT_FIELD(ADDR))
perf_sample__fprintf_addr(sample, thread, attr, fp);
if (PRINT_FIELD(DATA_SRC))
data_src__fprintf(sample->data_src, fp);
if (PRINT_FIELD(WEIGHT))
fprintf(fp, "%16" PRIu64, sample->weight);
if (PRINT_FIELD(INS_LAT))
fprintf(fp, "%16" PRIu16, sample->ins_lat);
if (PRINT_FIELD(RETIRE_LAT))
fprintf(fp, "%16" PRIu16, sample->retire_lat);
if (PRINT_FIELD(CGROUP)) {
const char *cgrp_name;
struct cgroup *cgrp = cgroup__find(machine->env,
sample->cgroup);
if (cgrp != NULL)
cgrp_name = cgrp->name;
else
cgrp_name = "unknown";
fprintf(fp, " %s", cgrp_name);
}
if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL;
if (script->stitch_lbr)
thread__set_lbr_stitch_enable(al->thread, true);
if (symbol_conf.use_callchain && sample->callchain) {
cursor = get_tls_callchain_cursor();
if (thread__resolve_callchain(al->thread, cursor, evsel,
sample, NULL, NULL,
scripting_max_stack))
cursor = NULL;
}
fputc(cursor ? '\n' : ' ', fp);
sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor,
symbol_conf.bt_stop_list, fp);
}
if (PRINT_FIELD(IREGS))
perf_sample__fprintf_iregs(sample, attr, arch, fp);
if (PRINT_FIELD(UREGS))
perf_sample__fprintf_uregs(sample, attr, arch, fp);
if (PRINT_FIELD(BRSTACK))
perf_sample__fprintf_brstack(sample, thread, attr, fp);
else if (PRINT_FIELD(BRSTACKSYM))
perf_sample__fprintf_brstacksym(sample, thread, attr, fp);
else if (PRINT_FIELD(BRSTACKOFF))
perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
perf_sample__fprintf_bpf_output(sample, fp);
perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
if (PRINT_FIELD(PHYS_ADDR))
fprintf(fp, "%16" PRIx64, sample->phys_addr);
if (PRINT_FIELD(DATA_PAGE_SIZE))
fprintf(fp, " %s", get_page_size_name(sample->data_page_size, str));
if (PRINT_FIELD(CODE_PAGE_SIZE))
fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
perf_sample__fprintf_ipc(sample, attr, fp);
fprintf(fp, "\n");
if (PRINT_FIELD(SRCCODE)) {
if (map__fprintf_srccode(al->map, al->addr, stdout,
thread__srccode_state(thread)))
printf("\n");
}
if (PRINT_FIELD(METRIC))
perf_sample__fprint_metric(script, thread, evsel, sample, fp);
if (verbose > 0)
fflush(fp);
}
static struct scripting_ops *scripting_ops;
static void __process_stat(struct evsel *counter, u64 tstamp)
{
int nthreads = perf_thread_map__nr(counter->core.threads);
int idx, thread;
struct perf_cpu cpu;
static int header_printed;
if (!header_printed) {
printf("%3s %8s %15s %15s %15s %15s %s\n",
"CPU", "THREAD", "VAL", "ENA", "RUN", "TIME", "EVENT");
header_printed = 1;
}
for (thread = 0; thread < nthreads; thread++) {
perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
struct perf_counts_values *counts;
counts = perf_counts(counter->counts, idx, thread);
printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
cpu.cpu,
perf_thread_map__pid(counter->core.threads, thread),
counts->val,
counts->ena,
counts->run,
tstamp,
evsel__name(counter));
}
}
}
static void process_stat(struct evsel *counter, u64 tstamp)
{
if (scripting_ops && scripting_ops->process_stat)
scripting_ops->process_stat(&stat_config, counter, tstamp);
else
__process_stat(counter, tstamp);
}
static void process_stat_interval(u64 tstamp)
{
if (scripting_ops && scripting_ops->process_stat_interval)
scripting_ops->process_stat_interval(tstamp);
}
static void setup_scripting(void)
{
#ifdef HAVE_LIBTRACEEVENT
setup_perl_scripting();
#endif
setup_python_scripting();
}
static int flush_scripting(void)
{
return scripting_ops ? scripting_ops->flush_script() : 0;
}
static int cleanup_scripting(void)
{
pr_debug("\nperf script stopped\n");
return scripting_ops ? scripting_ops->stop_script() : 0;
}
static bool filter_cpu(struct perf_sample *sample)
{
if (cpu_list && sample->cpu != (u32)-1)
return !test_bit(sample->cpu, cpu_bitmap);
return false;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
struct addr_location addr_al;
int ret = 0;
/* Set thread to NULL to indicate addr_al and al are not initialized */
addr_location__init(&al);
addr_location__init(&addr_al);
ret = dlfilter__filter_event_early(dlfilter, event, sample, evsel, machine, &al, &addr_al);
if (ret) {
if (ret > 0)
ret = 0;
goto out_put;
}
if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
sample->time)) {
goto out_put;
}
if (debug_mode) {
if (sample->time < last_timestamp) {
pr_err("Samples misordered, previous: %" PRIu64
" this: %" PRIu64 "\n", last_timestamp,
sample->time);
nr_unordered++;
}
last_timestamp = sample->time;
goto out_put;
}
if (filter_cpu(sample))
goto out_put;
if (!al.thread && machine__resolve(machine, &al, sample) < 0) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
ret = -1;
goto out_put;
}
if (al.filtered)
goto out_put;
if (!show_event(sample, evsel, al.thread, &al, &addr_al))
goto out_put;
if (evswitch__discard(&scr->evswitch, evsel))
goto out_put;
ret = dlfilter__filter_event(dlfilter, event, sample, evsel, machine, &al, &addr_al);
if (ret) {
if (ret > 0)
ret = 0;
goto out_put;
}
if (scripting_ops) {
struct addr_location *addr_al_ptr = NULL;
if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
sample_addr_correlates_sym(&evsel->core.attr)) {
if (!addr_al.thread)
thread__resolve(al.thread, &addr_al, sample);
addr_al_ptr = &addr_al;
}
scripting_ops->process_event(event, sample, evsel, &al, addr_al_ptr);
} else {
process_event(scr, sample, evsel, &al, &addr_al, machine);
}
out_put:
addr_location__exit(&addr_al);
addr_location__exit(&al);
return ret;
}
// Used when scr->per_event_dump is not set
static struct evsel_script es_stdout;
static int process_attr(struct perf_tool *tool, union perf_event *event,
struct evlist **pevlist)
{
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct evlist *evlist;
struct evsel *evsel, *pos;
u64 sample_type;
int err;
err = perf_event__process_attr(tool, event, pevlist);
if (err)
return err;
evlist = *pevlist;
evsel = evlist__last(*pevlist);
if (!evsel->priv) {
if (scr->per_event_dump) {
evsel->priv = evsel_script__new(evsel, scr->session->data);
if (!evsel->priv)
return -ENOMEM;
} else { // Replicate what is done in perf_script__setup_per_event_dump()
es_stdout.fp = stdout;
evsel->priv = &es_stdout;
}
}
if (evsel->core.attr.type >= PERF_TYPE_MAX &&
evsel->core.attr.type != PERF_TYPE_SYNTH)
return 0;
evlist__for_each_entry(evlist, pos) {
if (pos->core.attr.type == evsel->core.attr.type && pos != evsel)
return 0;
}
if (evsel->core.attr.sample_type) {
err = evsel__check_attr(evsel, scr->session);
if (err)
return err;
}
/*
* Check if we need to enable callchains based
* on events sample_type.
*/
sample_type = evlist__combined_sample_type(evlist);
callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
/* Enable fields for callchain entries */
if (symbol_conf.use_callchain &&
(sample_type & PERF_SAMPLE_CALLCHAIN ||
sample_type & PERF_SAMPLE_BRANCH_STACK ||
(sample_type & PERF_SAMPLE_REGS_USER &&
sample_type & PERF_SAMPLE_STACK_USER))) {
int type = output_type(evsel->core.attr.type);
if (!(output[type].user_unset_fields & PERF_OUTPUT_IP))
output[type].fields |= PERF_OUTPUT_IP;
if (!(output[type].user_unset_fields & PERF_OUTPUT_SYM))
output[type].fields |= PERF_OUTPUT_SYM;
}
set_print_ip_opts(&evsel->core.attr);
return 0;
}
static int print_event_with_time(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine,
pid_t pid, pid_t tid, u64 timestamp)
{
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = evlist__id2evsel(session->evlist, sample->id);
struct thread *thread = NULL;
if (evsel && !evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = timestamp;
sample->pid = pid;
sample->tid = tid;
}
if (filter_cpu(sample))
return 0;
if (tid != -1)
thread = machine__findnew_thread(machine, pid, tid);
if (evsel) {
perf_sample__fprintf_start(script, sample, thread, evsel,
event->header.type, stdout);
}
perf_event__fprintf(event, machine, stdout);
thread__put(thread);
return 0;
}
static int print_event(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine,
pid_t pid, pid_t tid)
{
return print_event_with_time(tool, event, sample, machine, pid, tid, 0);
}
static int process_comm_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (perf_event__process_comm(tool, event, sample, machine) < 0)
return -1;
return print_event(tool, event, sample, machine, event->comm.pid,
event->comm.tid);
}
static int process_namespaces_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (perf_event__process_namespaces(tool, event, sample, machine) < 0)
return -1;
return print_event(tool, event, sample, machine, event->namespaces.pid,
event->namespaces.tid);
}
static int process_cgroup_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (perf_event__process_cgroup(tool, event, sample, machine) < 0)
return -1;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static int process_fork_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (perf_event__process_fork(tool, event, sample, machine) < 0)
return -1;
return print_event_with_time(tool, event, sample, machine,
event->fork.pid, event->fork.tid,
event->fork.time);
}
static int process_exit_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
/* Print before 'exit' deletes anything */
if (print_event_with_time(tool, event, sample, machine, event->fork.pid,
event->fork.tid, event->fork.time))
return -1;
return perf_event__process_exit(tool, event, sample, machine);
}
static int process_mmap_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (perf_event__process_mmap(tool, event, sample, machine) < 0)
return -1;
return print_event(tool, event, sample, machine, event->mmap.pid,
event->mmap.tid);
}
static int process_mmap2_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
return -1;
return print_event(tool, event, sample, machine, event->mmap2.pid,
event->mmap2.tid);
}
static int process_switch_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (perf_event__process_switch(tool, event, sample, machine) < 0)
return -1;
if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
scripting_ops->process_switch(event, sample, machine);
if (!script->show_switch_events)
return 0;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static int process_auxtrace_error(struct perf_session *session,
union perf_event *event)
{
if (scripting_ops && scripting_ops->process_auxtrace_error) {
scripting_ops->process_auxtrace_error(session, event);
return 0;
}
return perf_event__process_auxtrace_error(session, event);
}
static int
process_lost_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static int
process_throttle_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (scripting_ops && scripting_ops->process_throttle)
scripting_ops->process_throttle(event, sample, machine);
return 0;
}
static int
process_finished_round_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct ordered_events *oe __maybe_unused)
{
perf_event__fprintf(event, NULL, stdout);
return 0;
}
static int
process_bpf_events(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (machine__process_ksymbol(machine, event, sample) < 0)
return -1;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static int process_text_poke_events(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
if (perf_event__process_text_poke(tool, event, sample, machine) < 0)
return -1;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
static void perf_script__fclose_per_event_dump(struct perf_script *script)
{
struct evlist *evlist = script->session->evlist;
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (!evsel->priv)
break;
evsel_script__delete(evsel->priv);
evsel->priv = NULL;
}
}
static int perf_script__fopen_per_event_dump(struct perf_script *script)
{
struct evsel *evsel;
evlist__for_each_entry(script->session->evlist, evsel) {
/*
* Already setup? I.e. we may be called twice in cases like
* Intel PT, one for the intel_pt// and dummy events, then
* for the evsels synthesized from the auxtrace info.
*
* Ses perf_script__process_auxtrace_info.
*/
if (evsel->priv != NULL)
continue;
evsel->priv = evsel_script__new(evsel, script->session->data);
if (evsel->priv == NULL)
goto out_err_fclose;
}
return 0;
out_err_fclose:
perf_script__fclose_per_event_dump(script);
return -1;
}
static int perf_script__setup_per_event_dump(struct perf_script *script)
{
struct evsel *evsel;
if (script->per_event_dump)
return perf_script__fopen_per_event_dump(script);
es_stdout.fp = stdout;
evlist__for_each_entry(script->session->evlist, evsel)
evsel->priv = &es_stdout;
return 0;
}
static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
{
struct evsel *evsel;
evlist__for_each_entry(script->session->evlist, evsel) {
struct evsel_script *es = evsel->priv;
evsel_script__fprintf(es, stdout);
evsel_script__delete(es);
evsel->priv = NULL;
}
}
static void perf_script__exit(struct perf_script *script)
{
perf_thread_map__put(script->threads);
perf_cpu_map__put(script->cpus);
}
static int __cmd_script(struct perf_script *script)
{
int ret;
signal(SIGINT, sig_handler);
/* override event processing functions */
if (script->show_task_events) {
script->tool.comm = process_comm_event;
script->tool.fork = process_fork_event;
script->tool.exit = process_exit_event;
}
if (script->show_mmap_events) {
script->tool.mmap = process_mmap_event;
script->tool.mmap2 = process_mmap2_event;
}
if (script->show_switch_events || (scripting_ops && scripting_ops->process_switch))
script->tool.context_switch = process_switch_event;
if (scripting_ops && scripting_ops->process_auxtrace_error)
script->tool.auxtrace_error = process_auxtrace_error;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
if (script->show_cgroup_events)
script->tool.cgroup = process_cgroup_event;
if (script->show_lost_events)
script->tool.lost = process_lost_event;
if (script->show_round_events) {
script->tool.ordered_events = false;
script->tool.finished_round = process_finished_round_event;
}
if (script->show_bpf_events) {
script->tool.ksymbol = process_bpf_events;
script->tool.bpf = process_bpf_events;
}
if (script->show_text_poke_events) {
script->tool.ksymbol = process_bpf_events;
script->tool.text_poke = process_text_poke_events;
}
if (perf_script__setup_per_event_dump(script)) {
pr_err("Couldn't create the per event dump files\n");
return -1;
}
ret = perf_session__process_events(script->session);
if (script->per_event_dump)
perf_script__exit_per_event_dump_stats(script);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
return ret;
}
struct script_spec {
struct list_head node;
struct scripting_ops *ops;
char spec[];
};
static LIST_HEAD(script_specs);
static struct script_spec *script_spec__new(const char *spec,
struct scripting_ops *ops)
{
struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1);
if (s != NULL) {
strcpy(s->spec, spec);
s->ops = ops;
}
return s;
}
static void script_spec__add(struct script_spec *s)
{
list_add_tail(&s->node, &script_specs);
}
static struct script_spec *script_spec__find(const char *spec)
{
struct script_spec *s;
list_for_each_entry(s, &script_specs, node)
if (strcasecmp(s->spec, spec) == 0)
return s;
return NULL;
}
int script_spec_register(const char *spec, struct scripting_ops *ops)
{
struct script_spec *s;
s = script_spec__find(spec);
if (s)
return -1;
s = script_spec__new(spec, ops);
if (!s)
return -1;
else
script_spec__add(s);
return 0;
}
static struct scripting_ops *script_spec__lookup(const char *spec)
{
struct script_spec *s = script_spec__find(spec);
if (!s)
return NULL;
return s->ops;
}
static void list_available_languages(void)
{
struct script_spec *s;
fprintf(stderr, "\n");
fprintf(stderr, "Scripting language extensions (used in "
"perf script -s [spec:]script.[spec]):\n\n");
list_for_each_entry(s, &script_specs, node)
fprintf(stderr, " %-42s [%s]\n", s->spec, s->ops->name);
fprintf(stderr, "\n");
}
/* Find script file relative to current directory or exec path */
static char *find_script(const char *script)
{
char path[PATH_MAX];
if (!scripting_ops) {
const char *ext = strrchr(script, '.');
if (!ext)
return NULL;
scripting_ops = script_spec__lookup(++ext);
if (!scripting_ops)
return NULL;
}
if (access(script, R_OK)) {
char *exec_path = get_argv_exec_path();
if (!exec_path)
return NULL;
snprintf(path, sizeof(path), "%s/scripts/%s/%s",
exec_path, scripting_ops->dirname, script);
free(exec_path);
script = path;
if (access(script, R_OK))
return NULL;
}
return strdup(script);
}
static int parse_scriptname(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
char spec[PATH_MAX];
const char *script, *ext;
int len;
if (strcmp(str, "lang") == 0) {
list_available_languages();
exit(0);
}
script = strchr(str, ':');
if (script) {
len = script - str;
if (len >= PATH_MAX) {
fprintf(stderr, "invalid language specifier");
return -1;
}
strncpy(spec, str, len);
spec[len] = '\0';
scripting_ops = script_spec__lookup(spec);
if (!scripting_ops) {
fprintf(stderr, "invalid language specifier");
return -1;
}
script++;
} else {
script = str;
ext = strrchr(script, '.');
if (!ext) {
fprintf(stderr, "invalid script extension");
return -1;
}
scripting_ops = script_spec__lookup(++ext);
if (!scripting_ops) {
fprintf(stderr, "invalid script extension");
return -1;
}
}
script_name = find_script(script);
if (!script_name)
script_name = strdup(script);
return 0;
}
static int parse_output_fields(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
char *tok, *strtok_saveptr = NULL;
int i, imax = ARRAY_SIZE(all_output_options);
int j;
int rc = 0;
char *str = strdup(arg);
int type = -1;
enum { DEFAULT, SET, ADD, REMOVE } change = DEFAULT;
if (!str)
return -ENOMEM;
/* first word can state for which event type the user is specifying
* the fields. If no type exists, the specified fields apply to all
* event types found in the file minus the invalid fields for a type.
*/
tok = strchr(str, ':');
if (tok) {
*tok = '\0';
tok++;
if (!strcmp(str, "hw"))
type = PERF_TYPE_HARDWARE;
else if (!strcmp(str, "sw"))
type = PERF_TYPE_SOFTWARE;
else if (!strcmp(str, "trace"))
type = PERF_TYPE_TRACEPOINT;
else if (!strcmp(str, "raw"))
type = PERF_TYPE_RAW;
else if (!strcmp(str, "break"))
type = PERF_TYPE_BREAKPOINT;
else if (!strcmp(str, "synth"))
type = OUTPUT_TYPE_SYNTH;
else {
fprintf(stderr, "Invalid event type in field string.\n");
rc = -EINVAL;
goto out;
}
if (output[type].user_set)
pr_warning("Overriding previous field request for %s events.\n",
event_type(type));
/* Don't override defaults for +- */
if (strchr(tok, '+') || strchr(tok, '-'))
goto parse;
output[type].fields = 0;
output[type].user_set = true;
output[type].wildcard_set = false;
} else {
tok = str;
if (strlen(str) == 0) {
fprintf(stderr,
"Cannot set fields to 'none' for all event types.\n");
rc = -EINVAL;
goto out;
}
/* Don't override defaults for +- */
if (strchr(str, '+') || strchr(str, '-'))
goto parse;
if (output_set_by_user())
pr_warning("Overriding previous field request for all events.\n");
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
output[j].fields = 0;
output[j].user_set = true;
output[j].wildcard_set = true;
}
}
parse:
for (tok = strtok_r(tok, ",", &strtok_saveptr); tok; tok = strtok_r(NULL, ",", &strtok_saveptr)) {
if (*tok == '+') {
if (change == SET)
goto out_badmix;
change = ADD;
tok++;
} else if (*tok == '-') {
if (change == SET)
goto out_badmix;
change = REMOVE;
tok++;
} else {
if (change != SET && change != DEFAULT)
goto out_badmix;
change = SET;
}
for (i = 0; i < imax; ++i) {
if (strcmp(tok, all_output_options[i].str) == 0)
break;
}
if (i == imax && strcmp(tok, "flags") == 0) {
print_flags = change != REMOVE;
continue;
}
if (i == imax) {
fprintf(stderr, "Invalid field requested.\n");
rc = -EINVAL;
goto out;
}
if (type == -1) {
/* add user option to all events types for
* which it is valid
*/
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
if (output[j].invalid_fields & all_output_options[i].field) {
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
all_output_options[i].str, event_type(j));
} else {
if (change == REMOVE) {
output[j].fields &= ~all_output_options[i].field;
output[j].user_set_fields &= ~all_output_options[i].field;
output[j].user_unset_fields |= all_output_options[i].field;
} else {
output[j].fields |= all_output_options[i].field;
output[j].user_set_fields |= all_output_options[i].field;
output[j].user_unset_fields &= ~all_output_options[i].field;
}
output[j].user_set = true;
output[j].wildcard_set = true;
}
}
} else {
if (output[type].invalid_fields & all_output_options[i].field) {
fprintf(stderr, "\'%s\' not valid for %s events.\n",
all_output_options[i].str, event_type(type));
rc = -EINVAL;
goto out;
}
if (change == REMOVE)
output[type].fields &= ~all_output_options[i].field;
else
output[type].fields |= all_output_options[i].field;
output[type].user_set = true;
output[type].wildcard_set = true;
}
}
if (type >= 0) {
if (output[type].fields == 0) {
pr_debug("No fields requested for %s type. "
"Events will not be displayed.\n", event_type(type));
}
}
goto out;
out_badmix:
fprintf(stderr, "Cannot mix +-field with overridden fields\n");
rc = -EINVAL;
out:
free(str);
return rc;
}
#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
while ((lang_dirent = readdir(scripts_dir)) != NULL) \
if ((lang_dirent->d_type == DT_DIR || \
(lang_dirent->d_type == DT_UNKNOWN && \
is_directory(scripts_path, lang_dirent))) && \
(strcmp(lang_dirent->d_name, ".")) && \
(strcmp(lang_dirent->d_name, "..")))
#define for_each_script(lang_path, lang_dir, script_dirent) \
while ((script_dirent = readdir(lang_dir)) != NULL) \
if (script_dirent->d_type != DT_DIR && \
(script_dirent->d_type != DT_UNKNOWN || \
!is_directory(lang_path, script_dirent)))
#define RECORD_SUFFIX "-record"
#define REPORT_SUFFIX "-report"
struct script_desc {
struct list_head node;
char *name;
char *half_liner;
char *args;
};
static LIST_HEAD(script_descs);
static struct script_desc *script_desc__new(const char *name)
{
struct script_desc *s = zalloc(sizeof(*s));
if (s != NULL && name)
s->name = strdup(name);
return s;
}
static void script_desc__delete(struct script_desc *s)
{
zfree(&s->name);
zfree(&s->half_liner);
zfree(&s->args);
free(s);
}
static void script_desc__add(struct script_desc *s)
{
list_add_tail(&s->node, &script_descs);
}
static struct script_desc *script_desc__find(const char *name)
{
struct script_desc *s;
list_for_each_entry(s, &script_descs, node)
if (strcasecmp(s->name, name) == 0)
return s;
return NULL;
}
static struct script_desc *script_desc__findnew(const char *name)
{
struct script_desc *s = script_desc__find(name);
if (s)
return s;
s = script_desc__new(name);
if (!s)
return NULL;
script_desc__add(s);
return s;
}
static const char *ends_with(const char *str, const char *suffix)
{
size_t suffix_len = strlen(suffix);
const char *p = str;
if (strlen(str) > suffix_len) {
p = str + strlen(str) - suffix_len;
if (!strncmp(p, suffix, suffix_len))
return p;
}
return NULL;
}
static int read_script_info(struct script_desc *desc, const char *filename)
{
char line[BUFSIZ], *p;
FILE *fp;
fp = fopen(filename, "r");
if (!fp)
return -1;
while (fgets(line, sizeof(line), fp)) {
p = skip_spaces(line);
if (strlen(p) == 0)
continue;
if (*p != '#')
continue;
p++;
if (strlen(p) && *p == '!')
continue;
p = skip_spaces(p);
if (strlen(p) && p[strlen(p) - 1] == '\n')
p[strlen(p) - 1] = '\0';
if (!strncmp(p, "description:", strlen("description:"))) {
p += strlen("description:");
desc->half_liner = strdup(skip_spaces(p));
continue;
}
if (!strncmp(p, "args:", strlen("args:"))) {
p += strlen("args:");
desc->args = strdup(skip_spaces(p));
continue;
}
}
fclose(fp);
return 0;
}
static char *get_script_root(struct dirent *script_dirent, const char *suffix)
{
char *script_root, *str;
script_root = strdup(script_dirent->d_name);
if (!script_root)
return NULL;
str = (char *)ends_with(script_root, suffix);
if (!str) {
free(script_root);
return NULL;
}
*str = '\0';
return script_root;
}
static int list_available_scripts(const struct option *opt __maybe_unused,
const char *s __maybe_unused,
int unset __maybe_unused)
{
struct dirent *script_dirent, *lang_dirent;
char *buf, *scripts_path, *script_path, *lang_path, *first_half;
DIR *scripts_dir, *lang_dir;
struct script_desc *desc;
char *script_root;
buf = malloc(3 * MAXPATHLEN + BUFSIZ);
if (!buf) {
pr_err("malloc failed\n");
exit(-1);
}
scripts_path = buf;
script_path = buf + MAXPATHLEN;
lang_path = buf + 2 * MAXPATHLEN;
first_half = buf + 3 * MAXPATHLEN;
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
scripts_dir = opendir(scripts_path);
if (!scripts_dir) {
fprintf(stdout,
"open(%s) failed.\n"
"Check \"PERF_EXEC_PATH\" env to set scripts dir.\n",
scripts_path);
free(buf);
exit(-1);
}
for_each_lang(scripts_path, scripts_dir, lang_dirent) {
scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
lang_dirent->d_name);
lang_dir = opendir(lang_path);
if (!lang_dir)
continue;
for_each_script(lang_path, lang_dir, script_dirent) {
script_root = get_script_root(script_dirent, REPORT_SUFFIX);
if (script_root) {
desc = script_desc__findnew(script_root);
scnprintf(script_path, MAXPATHLEN, "%s/%s",
lang_path, script_dirent->d_name);
read_script_info(desc, script_path);
free(script_root);
}
}
}
fprintf(stdout, "List of available trace scripts:\n");
list_for_each_entry(desc, &script_descs, node) {
sprintf(first_half, "%s %s", desc->name,
desc->args ? desc->args : "");
fprintf(stdout, " %-36s %s\n", first_half,
desc->half_liner ? desc->half_liner : "");
}
free(buf);
exit(0);
}
static int add_dlarg(const struct option *opt __maybe_unused,
const char *s, int unset __maybe_unused)
{
char *arg = strdup(s);
void *a;
if (!arg)
return -1;
a = realloc(dlargv, sizeof(dlargv[0]) * (dlargc + 1));
if (!a) {
free(arg);
return -1;
}
dlargv = a;
dlargv[dlargc++] = arg;
return 0;
}
static void free_dlarg(void)
{
while (dlargc--)
free(dlargv[dlargc]);
free(dlargv);
}
/*
* Some scripts specify the required events in their "xxx-record" file,
* this function will check if the events in perf.data match those
* mentioned in the "xxx-record".
*
* Fixme: All existing "xxx-record" are all in good formats "-e event ",
* which is covered well now. And new parsing code should be added to
* cover the future complex formats like event groups etc.
*/
static int check_ev_match(char *dir_name, char *scriptname,
struct perf_session *session)
{
char filename[MAXPATHLEN], evname[128];
char line[BUFSIZ], *p;
struct evsel *pos;
int match, len;
FILE *fp;
scnprintf(filename, MAXPATHLEN, "%s/bin/%s-record", dir_name, scriptname);
fp = fopen(filename, "r");
if (!fp)
return -1;
while (fgets(line, sizeof(line), fp)) {
p = skip_spaces(line);
if (*p == '#')
continue;
while (strlen(p)) {
p = strstr(p, "-e");
if (!p)
break;
p += 2;
p = skip_spaces(p);
len = strcspn(p, " \t");
if (!len)
break;
snprintf(evname, len + 1, "%s", p);
match = 0;
evlist__for_each_entry(session->evlist, pos) {
if (!strcmp(evsel__name(pos), evname)) {
match = 1;
break;
}
}
if (!match) {
fclose(fp);
return -1;
}
}
}
fclose(fp);
return 0;
}
/*
* Return -1 if none is found, otherwise the actual scripts number.
*
* Currently the only user of this function is the script browser, which
* will list all statically runnable scripts, select one, execute it and
* show the output in a perf browser.
*/
int find_scripts(char **scripts_array, char **scripts_path_array, int num,
int pathlen)
{
struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
};
char *temp;
int i = 0;
session = perf_session__new(&data, NULL);
if (IS_ERR(session))
return PTR_ERR(session);
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
scripts_dir = opendir(scripts_path);
if (!scripts_dir) {
perf_session__delete(session);
return -1;
}
for_each_lang(scripts_path, scripts_dir, lang_dirent) {
scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
lang_dirent->d_name);
#ifndef HAVE_LIBPERL_SUPPORT
if (strstr(lang_path, "perl"))
continue;
#endif
#ifndef HAVE_LIBPYTHON_SUPPORT
if (strstr(lang_path, "python"))
continue;
#endif
lang_dir = opendir(lang_path);
if (!lang_dir)
continue;
for_each_script(lang_path, lang_dir, script_dirent) {
/* Skip those real time scripts: xxxtop.p[yl] */
if (strstr(script_dirent->d_name, "top."))
continue;
if (i >= num)
break;
snprintf(scripts_path_array[i], pathlen, "%s/%s",
lang_path,
script_dirent->d_name);
temp = strchr(script_dirent->d_name, '.');
snprintf(scripts_array[i],
(temp - script_dirent->d_name) + 1,
"%s", script_dirent->d_name);
if (check_ev_match(lang_path,
scripts_array[i], session))
continue;
i++;
}
closedir(lang_dir);
}
closedir(scripts_dir);
perf_session__delete(session);
return i;
}
static char *get_script_path(const char *script_root, const char *suffix)
{
struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN];
char script_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
char lang_path[MAXPATHLEN];
char *__script_root;
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
scripts_dir = opendir(scripts_path);
if (!scripts_dir)
return NULL;
for_each_lang(scripts_path, scripts_dir, lang_dirent) {
scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
lang_dirent->d_name);
lang_dir = opendir(lang_path);
if (!lang_dir)
continue;
for_each_script(lang_path, lang_dir, script_dirent) {
__script_root = get_script_root(script_dirent, suffix);
if (__script_root && !strcmp(script_root, __script_root)) {
free(__script_root);
closedir(scripts_dir);
scnprintf(script_path, MAXPATHLEN, "%s/%s",
lang_path, script_dirent->d_name);
closedir(lang_dir);
return strdup(script_path);
}
free(__script_root);
}
closedir(lang_dir);
}
closedir(scripts_dir);
return NULL;
}
static bool is_top_script(const char *script_path)
{
return ends_with(script_path, "top") != NULL;
}
static int has_required_arg(char *script_path)
{
struct script_desc *desc;
int n_args = 0;
char *p;
desc = script_desc__new(NULL);
if (read_script_info(desc, script_path))
goto out;
if (!desc->args)
goto out;
for (p = desc->args; *p; p++)
if (*p == '<')
n_args++;
out:
script_desc__delete(desc);
return n_args;
}
static int have_cmd(int argc, const char **argv)
{
char **__argv = malloc(sizeof(const char *) * argc);
if (!__argv) {
pr_err("malloc failed\n");
return -1;
}
memcpy(__argv, argv, sizeof(const char *) * argc);
argc = parse_options(argc, (const char **)__argv, record_options,
NULL, PARSE_OPT_STOP_AT_NON_OPTION);
free(__argv);
system_wide = (argc == 0);
return 0;
}
static void script__setup_sample_type(struct perf_script *script)
{
struct perf_session *session = script->session;
u64 sample_type = evlist__combined_sample_type(session->evlist);
callchain_param_setup(sample_type, perf_env__arch(session->machines.host.env));
if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
"Please apply --call-graph lbr when recording.\n");
script->stitch_lbr = false;
}
}
static int process_stat_round_event(struct perf_session *session,
union perf_event *event)
{
struct perf_record_stat_round *round = &event->stat_round;
struct evsel *counter;
evlist__for_each_entry(session->evlist, counter) {
perf_stat_process_counter(&stat_config, counter);
process_stat(counter, round->time);
}
process_stat_interval(round->time);
return 0;
}
static int process_stat_config_event(struct perf_session *session __maybe_unused,
union perf_event *event)
{
perf_event__read_stat_config(&stat_config, &event->stat_config);
/*
* Aggregation modes are not used since post-processing scripts are
* supposed to take care of such requirements
*/
stat_config.aggr_mode = AGGR_NONE;
return 0;
}
static int set_maps(struct perf_script *script)
{
struct evlist *evlist = script->session->evlist;
if (!script->cpus || !script->threads)
return 0;
if (WARN_ONCE(script->allocated, "stats double allocation\n"))
return -EINVAL;
perf_evlist__set_maps(&evlist->core, script->cpus, script->threads);
if (evlist__alloc_stats(&stat_config, evlist, /*alloc_raw=*/true))
return -ENOMEM;
script->allocated = true;
return 0;
}
static
int process_thread_map_event(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (dump_trace)
perf_event__fprintf_thread_map(event, stdout);
if (script->threads) {
pr_warning("Extra thread map event, ignoring.\n");
return 0;
}
script->threads = thread_map__new_event(&event->thread_map);
if (!script->threads)
return -ENOMEM;
return set_maps(script);
}
static
int process_cpu_map_event(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (dump_trace)
perf_event__fprintf_cpu_map(event, stdout);
if (script->cpus) {
pr_warning("Extra cpu map event, ignoring.\n");
return 0;
}
script->cpus = cpu_map__new_data(&event->cpu_map.data);
if (!script->cpus)
return -ENOMEM;
return set_maps(script);
}
static int process_feature_event(struct perf_session *session,
union perf_event *event)
{
if (event->feat.feat_id < HEADER_LAST_FEATURE)
return perf_event__process_feature(session, event);
return 0;
}
#ifdef HAVE_AUXTRACE_SUPPORT
static int perf_script__process_auxtrace_info(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
int ret = perf_event__process_auxtrace_info(session, event);
if (ret == 0) {
struct perf_script *script = container_of(tool, struct perf_script, tool);
ret = perf_script__setup_per_event_dump(script);
}
return ret;
}
#else
#define perf_script__process_auxtrace_info 0
#endif
static int parse_insn_trace(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
parse_output_fields(NULL, "+insn,-event,-period", 0);
itrace_parse_synth_opts(opt, "i0ns", 0);
symbol_conf.nanosecs = true;
return 0;
}
static int parse_xed(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
if (isatty(1))
force_pager("xed -F insn: -A -64 | less");
else
force_pager("xed -F insn: -A -64");
return 0;
}
static int parse_call_trace(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
itrace_parse_synth_opts(opt, "cewp", 0);
symbol_conf.nanosecs = true;
symbol_conf.pad_output_len_dso = 50;
return 0;
}
static int parse_callret_trace(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
itrace_parse_synth_opts(opt, "crewp", 0);
symbol_conf.nanosecs = true;
return 0;
}
int cmd_script(int argc, const char **argv)
{
bool show_full_info = false;
bool header = false;
bool header_only = false;
bool script_started = false;
bool unsorted_dump = false;
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;
struct itrace_synth_opts itrace_synth_opts = {
.set = false,
.default_no_sample = true,
};
struct utsname uts;
char *script_path = NULL;
const char *dlfilter_file = NULL;
const char **__argv;
int i, j, err = 0;
struct perf_script script = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
.cgroup = perf_event__process_cgroup,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.attr = process_attr,
.event_update = perf_event__process_event_update,
#ifdef HAVE_LIBTRACEEVENT
.tracing_data = perf_event__process_tracing_data,
#endif
.feature = process_feature_event,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_script__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.auxtrace_error = perf_event__process_auxtrace_error,
.stat = perf_event__process_stat_event,
.stat_round = process_stat_round_event,
.stat_config = process_stat_config_event,
.thread_map = process_thread_map_event,
.cpu_map = process_cpu_map_event,
.throttle = process_throttle_event,
.unthrottle = process_throttle_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN(0, "dump-unsorted-raw-trace", &unsorted_dump,
"dump unsorted raw trace in ASCII"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('L', "Latency", &latency_format,
"show latency attributes (irqs/preemption disabled, etc)"),
OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
list_available_scripts),
OPT_CALLBACK_NOOPT(0, "list-dlfilters", NULL, NULL, "list available dlfilters",
list_available_dlfilters),
OPT_CALLBACK('s', "script", NULL, "name",
"script file name (lang:script name, script name, or *)",
parse_scriptname),
OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
"generate perf-script.xx script in specified language"),
OPT_STRING(0, "dlfilter", &dlfilter_file, "file", "filter .so file name"),
OPT_CALLBACK(0, "dlarg", NULL, "argument", "filter argument",
add_dlarg),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('d', "debug-mode", &debug_mode,
"do various checks like samples ordering and lost events"),
OPT_BOOLEAN(0, "header", &header, "Show data header."),
OPT_BOOLEAN(0, "header-only", &header_only, "Show only data header."),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
"When printing symbols do not display call chain"),
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
OPT_CALLBACK('F', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. "
"+field to add and -field to remove."
"Valid types: hw,sw,trace,raw,synth. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,dsoff"
"addr,symoff,srcline,period,iregs,uregs,brstack,"
"brstacksym,flags,data_src,weight,bpf-output,brstackinsn,"
"brstackinsnlen,brstackoff,callindent,insn,insnlen,synth,"
"phys_addr,metric,misc,srccode,ipc,tod,data_page_size,"
"code_page_size,ins_lat,machine_pid,vcpu,cgroup,retire_lat",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these DSOs"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_INTEGER(0, "addr-range", &symbol_conf.addr_range,
"Use with -S to list traced records within address range"),
OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL,
"Decode instructions from itrace", parse_insn_trace),
OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
"Run xed disassembler on output", parse_xed),
OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL,
"Decode calls from itrace", parse_call_trace),
OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL,
"Decode calls and returns from itrace", parse_callret_trace),
OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]",
"Only print symbols and callees with --call-trace/--call-ret-trace"),
OPT_STRING(0, "stop-bt", &symbol_conf.bt_stop_list_str, "symbol[,symbol...]",
"Stop display of callgraph at these symbols"),
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only display events for these comms"),
OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
"only consider symbols in these pids"),
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
"only consider symbols in these tids"),
OPT_UINTEGER(0, "max-stack", &scripting_max_stack,
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
OPT_BOOLEAN(0, "deltatime", &deltatime, "Show time stamps relative to previous event"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
"Show the path of [kernel.kallsyms]"),
OPT_BOOLEAN('\0', "show-task-events", &script.show_task_events,
"Show the fork/comm/exit events"),
OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
"Show the mmap events"),
OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events,
"Show context switch events (if recorded)"),
OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
"Show namespace events (if recorded)"),
OPT_BOOLEAN('\0', "show-cgroup-events", &script.show_cgroup_events,
"Show cgroup events (if recorded)"),
OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
"Show lost events (if recorded)"),
OPT_BOOLEAN('\0', "show-round-events", &script.show_round_events,
"Show round events (if recorded)"),
OPT_BOOLEAN('\0', "show-bpf-events", &script.show_bpf_events,
"Show bpf related events (if recorded)"),
OPT_BOOLEAN('\0', "show-text-poke-events", &script.show_text_poke_events,
"Show text poke related events (if recorded)"),
OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
"Dump trace output to files named by the monitored events"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_INTEGER(0, "max-blocks", &max_blocks,
"Maximum number of code blocks to dump with brstackinsn"),
OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
"Use 9 decimal places when displaying time"),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
"Show full source file name path for source lines"),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Enable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_STRING(0, "time", &script.time_str, "str",
"Time span of interest (start,stop)"),
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
"Show inline function"),
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
"guest mount directory under which every guest os"
" instance has a subdir"),
OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
"file", "file saving guest os vmlinux"),
OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
"Guest code can be found in hypervisor process"),
OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPTS_EVSWITCH(&script.evswitch),
OPT_END()
};
const char * const script_subcommands[] = { "record", "report", NULL };
const char *script_usage[] = {
"perf script [<options>]",
"perf script [<options>] record <script> [<record-options>] <command>",
"perf script [<options>] report <script> [script-args]",
"perf script [<options>] <script> [<record-options>] <command>",
"perf script [<options>] <top-script> [script-args]",
NULL
};
perf_set_singlethreaded();
setup_scripting();
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (symbol_conf.guestmount ||
symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_kallsyms ||
symbol_conf.default_guest_modules ||
symbol_conf.guest_code) {
/*
* Enable guest sample processing.
*/
perf_guest = true;
}
data.path = input_name;
data.force = symbol_conf.force;
if (unsorted_dump) {
dump_trace = true;
script.tool.ordered_events = false;
}
if (symbol__validate_sym_arguments())
return -1;
if (argc > 1 && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
if (!rec_script_path)
return cmd_record(argc, argv);
}
if (argc > 1 && strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
if (!rep_script_path) {
fprintf(stderr,
"Please specify a valid report script"
"(see 'perf script -l' for listing)\n");
return -1;
}
}
if (reltime && deltatime) {
fprintf(stderr,
"reltime and deltatime - the two don't get along well. "
"Please limit to --reltime or --deltatime.\n");
return -1;
}
if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;
/* make sure PERF_EXEC_PATH is set for scripts */
set_argv_exec_path(get_argv_exec_path());
if (argc && !script_name && !rec_script_path && !rep_script_path) {
int live_pipe[2];
int rep_args;
pid_t pid;
rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
if (!rec_script_path && !rep_script_path) {
script_name = find_script(argv[0]);
if (script_name) {
argc -= 1;
argv += 1;
goto script_found;
}
usage_with_options_msg(script_usage, options,
"Couldn't find script `%s'\n\n See perf"
" script -l for available scripts.\n", argv[0]);
}
if (is_top_script(argv[0])) {
rep_args = argc - 1;
} else {
int rec_args;
rep_args = has_required_arg(rep_script_path);
rec_args = (argc - 1) - rep_args;
if (rec_args < 0) {
usage_with_options_msg(script_usage, options,
"`%s' script requires options."
"\n\n See perf script -l for available "
"scripts and options.\n", argv[0]);
}
}
if (pipe(live_pipe) < 0) {
perror("failed to create pipe");
return -1;
}
pid = fork();
if (pid < 0) {
perror("failed to fork");
return -1;
}
if (!pid) {
j = 0;
dup2(live_pipe[1], 1);
close(live_pipe[0]);
if (is_top_script(argv[0])) {
system_wide = true;
} else if (!system_wide) {
if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) {
err = -1;
goto out;
}
}
__argv = malloc((argc + 6) * sizeof(const char *));
if (!__argv) {
pr_err("malloc failed\n");
err = -ENOMEM;
goto out;
}
__argv[j++] = "/bin/sh";
__argv[j++] = rec_script_path;
if (system_wide)
__argv[j++] = "-a";
__argv[j++] = "-q";
__argv[j++] = "-o";
__argv[j++] = "-";
for (i = rep_args + 1; i < argc; i++)
__argv[j++] = argv[i];
__argv[j++] = NULL;
execvp("/bin/sh", (char **)__argv);
free(__argv);
exit(-1);
}
dup2(live_pipe[0], 0);
close(live_pipe[1]);
__argv = malloc((argc + 4) * sizeof(const char *));
if (!__argv) {
pr_err("malloc failed\n");
err = -ENOMEM;
goto out;
}
j = 0;
__argv[j++] = "/bin/sh";
__argv[j++] = rep_script_path;
for (i = 1; i < rep_args + 1; i++)
__argv[j++] = argv[i];
__argv[j++] = "-i";
__argv[j++] = "-";
__argv[j++] = NULL;
execvp("/bin/sh", (char **)__argv);
free(__argv);
exit(-1);
}
script_found:
if (rec_script_path)
script_path = rec_script_path;
if (rep_script_path)
script_path = rep_script_path;
if (script_path) {
j = 0;
if (!rec_script_path)
system_wide = false;
else if (!system_wide) {
if (have_cmd(argc - 1, &argv[1]) != 0) {
err = -1;
goto out;
}
}
__argv = malloc((argc + 2) * sizeof(const char *));
if (!__argv) {
pr_err("malloc failed\n");
err = -ENOMEM;
goto out;
}
__argv[j++] = "/bin/sh";
__argv[j++] = script_path;
if (system_wide)
__argv[j++] = "-a";
for (i = 2; i < argc; i++)
__argv[j++] = argv[i];
__argv[j++] = NULL;
execvp("/bin/sh", (char **)__argv);
free(__argv);
exit(-1);
}
if (dlfilter_file) {
dlfilter = dlfilter__new(dlfilter_file, dlargc, dlargv);
if (!dlfilter)
return -1;
}
if (!script_name) {
setup_pager();
use_browser = 0;
}
session = perf_session__new(&data, &script.tool);
if (IS_ERR(session))
return PTR_ERR(session);
if (header || header_only) {
script.tool.show_feat_hdr = SHOW_FEAT_HEADER;
perf_session__fprintf_info(session, stdout, show_full_info);
if (header_only)
goto out_delete;
}
if (show_full_info)
script.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
if (symbol__init(&session->header.env) < 0)
goto out_delete;
uname(&uts);
if (data.is_pipe) { /* Assume pipe_mode indicates native_arch */
native_arch = true;
} else if (session->header.env.arch) {
if (!strcmp(uts.machine, session->header.env.arch))
native_arch = true;
else if (!strcmp(uts.machine, "x86_64") &&
!strcmp(session->header.env.arch, "i386"))
native_arch = true;
}
script.session = session;
script__setup_sample_type(&script);
if ((output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT) ||
symbol_conf.graph_function)
itrace_synth_opts.thread_stack = true;
session->itrace_synth_opts = &itrace_synth_opts;
if (cpu_list) {
err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
if (err < 0)
goto out_delete;
itrace_synth_opts.cpu_bitmap = cpu_bitmap;
}
if (!no_callchain)
symbol_conf.use_callchain = true;
else
symbol_conf.use_callchain = false;
#ifdef HAVE_LIBTRACEEVENT
if (session->tevent.pevent &&
tep_set_function_resolver(session->tevent.pevent,
machine__resolve_kernel_addr,
&session->machines.host) < 0) {
pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
err = -1;
goto out_delete;
}
#endif
if (generate_script_lang) {
struct stat perf_stat;
int input;
if (output_set_by_user()) {
fprintf(stderr,
"custom fields not supported for generated scripts");
err = -EINVAL;
goto out_delete;
}
input = open(data.path, O_RDONLY); /* input_name */
if (input < 0) {
err = -errno;
perror("failed to open file");
goto out_delete;
}
err = fstat(input, &perf_stat);
if (err < 0) {
perror("failed to stat file");
goto out_delete;
}
if (!perf_stat.st_size) {
fprintf(stderr, "zero-sized file, nothing to do!\n");
goto out_delete;
}
scripting_ops = script_spec__lookup(generate_script_lang);
if (!scripting_ops) {
fprintf(stderr, "invalid language specifier");
err = -ENOENT;
goto out_delete;
}
#ifdef HAVE_LIBTRACEEVENT
err = scripting_ops->generate_script(session->tevent.pevent,
"perf-script");
#else
err = scripting_ops->generate_script(NULL, "perf-script");
#endif
goto out_delete;
}
err = dlfilter__start(dlfilter, session);
if (err)
goto out_delete;
if (script_name) {
err = scripting_ops->start_script(script_name, argc, argv, session);
if (err)
goto out_delete;
pr_debug("perf script started with script %s\n\n", script_name);
script_started = true;
}
err = perf_session__check_output_opt(session);
if (err < 0)
goto out_delete;
if (script.time_str) {
err = perf_time__parse_for_ranges_reltime(script.time_str, session,
&script.ptime_range,
&script.range_size,
&script.range_num,
reltime);
if (err < 0)
goto out_delete;
itrace_synth_opts__set_time_range(&itrace_synth_opts,
script.ptime_range,
script.range_num);
}
err = evswitch__init(&script.evswitch, session->evlist, stderr);
if (err)
goto out_delete;
if (zstd_init(&(session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
err = __cmd_script(&script);
flush_scripting();
out_delete:
if (script.ptime_range) {
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
zfree(&script.ptime_range);
}
zstd_fini(&(session->zstd_data));
evlist__free_stats(session->evlist);
perf_session__delete(session);
perf_script__exit(&script);
if (script_started)
cleanup_scripting();
dlfilter__cleanup(dlfilter);
free_dlarg();
out:
return err;
}
| linux-master | tools/perf/builtin-script.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <stdio.h>
#include <string.h>
#include "builtin.h"
#include "debug.h"
#include <subcmd/parse-options.h>
#include "data-convert.h"
#include "util/util.h"
typedef int (*data_cmd_fn_t)(int argc, const char **argv);
struct data_cmd {
const char *name;
const char *summary;
data_cmd_fn_t fn;
};
static struct data_cmd data_cmds[];
#define for_each_cmd(cmd) \
for (cmd = data_cmds; cmd && cmd->name; cmd++)
static const char * const data_subcommands[] = { "convert", NULL };
static const char *data_usage[] = {
"perf data convert [<options>]",
NULL
};
const char *to_json;
const char *to_ctf;
struct perf_data_convert_opts opts = {
.force = false,
.all = false,
};
const struct option data_options[] = {
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_STRING(0, "to-json", &to_json, NULL, "Convert to JSON format"),
#ifdef HAVE_LIBBABELTRACE_SUPPORT
OPT_STRING(0, "to-ctf", &to_ctf, NULL, "Convert to CTF format"),
OPT_BOOLEAN(0, "tod", &opts.tod, "Convert time to wall clock time"),
#endif
OPT_BOOLEAN('f', "force", &opts.force, "don't complain, do it"),
OPT_BOOLEAN(0, "all", &opts.all, "Convert all events"),
OPT_END()
};
static int cmd_data_convert(int argc, const char **argv)
{
argc = parse_options(argc, argv, data_options,
data_usage, 0);
if (argc) {
usage_with_options(data_usage, data_options);
return -1;
}
if (to_json && to_ctf) {
pr_err("You cannot specify both --to-ctf and --to-json.\n");
return -1;
}
#ifdef HAVE_LIBBABELTRACE_SUPPORT
if (!to_json && !to_ctf) {
pr_err("You must specify one of --to-ctf or --to-json.\n");
return -1;
}
#else
if (!to_json) {
pr_err("You must specify --to-json.\n");
return -1;
}
#endif
if (to_json)
return bt_convert__perf2json(input_name, to_json, &opts);
if (to_ctf) {
#if defined(HAVE_LIBBABELTRACE_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
return bt_convert__perf2ctf(input_name, to_ctf, &opts);
#else
pr_err("The libbabeltrace support is not compiled in. perf should be "
"compiled with environment variables LIBBABELTRACE=1 and "
"LIBBABELTRACE_DIR=/path/to/libbabeltrace/.\n"
"Check also if libbtraceevent devel files are available.\n");
return -1;
#endif
}
return 0;
}
static struct data_cmd data_cmds[] = {
{ "convert", "converts data file between formats", cmd_data_convert },
{ .name = NULL, },
};
int cmd_data(int argc, const char **argv)
{
struct data_cmd *cmd;
const char *cmdstr;
argc = parse_options_subcommand(argc, argv, data_options, data_subcommands, data_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc) {
usage_with_options(data_usage, data_options);
return -1;
}
cmdstr = argv[0];
for_each_cmd(cmd) {
if (strcmp(cmd->name, cmdstr))
continue;
return cmd->fn(argc, argv);
}
pr_err("Unknown command: %s\n", cmdstr);
usage_with_options(data_usage, data_options);
return -1;
}
| linux-master | tools/perf/builtin-data.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-stat.c
*
* Builtin stat command: Give a precise performance counters summary
* overview about any workload, CPU or specific PID.
*
* Sample output:
$ perf stat ./hackbench 10
Time: 0.118
Performance counter stats for './hackbench 10':
1708.761321 task-clock # 11.037 CPUs utilized
41,190 context-switches # 0.024 M/sec
6,735 CPU-migrations # 0.004 M/sec
17,318 page-faults # 0.010 M/sec
5,205,202,243 cycles # 3.046 GHz
3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
2,603,501,247 instructions # 0.50 insns per cycle
# 1.48 stalled cycles per insn
484,357,498 branches # 283.455 M/sec
6,388,934 branch-misses # 1.32% of all branches
0.154822978 seconds time elapsed
*
* Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <[email protected]>
*
* Improvements and fixes by:
*
* Arjan van de Ven <[email protected]>
* Yanmin Zhang <[email protected]>
* Wu Fengguang <[email protected]>
* Mike Galbraith <[email protected]>
* Paul Mackerras <[email protected]>
* Jaswinder Singh Rajput <[email protected]>
*/
#include "builtin.h"
#include "util/cgroup.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/pmus.h"
#include "util/pmu.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/counts.h"
#include "util/topdown.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/string2.h"
#include "util/metricgroup.h"
#include "util/synthetic-events.h"
#include "util/target.h"
#include "util/time-utils.h"
#include "util/top.h"
#include "util/affinity.h"
#include "util/pfm.h"
#include "util/bpf_counter.h"
#include "util/iostat.h"
#include "util/util.h"
#include "asm/bug.h"
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <api/fs/fs.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/prctl.h>
#include <inttypes.h>
#include <locale.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <perf/evlist.h>
#include <internal/threadmap.h>
#define DEFAULT_SEPARATOR " "
#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
static void print_counters(struct timespec *ts, int argc, const char **argv);
static struct evlist *evsel_list;
static struct parse_events_option_args parse_events_option_args = {
.evlistp = &evsel_list,
};
static bool all_counters_use_bpf = true;
static struct target target = {
.uid = UINT_MAX,
};
#define METRIC_ONLY_LEN 20
static volatile sig_atomic_t child_pid = -1;
static int detailed_run = 0;
static bool transaction_run;
static bool topdown_run = false;
static bool smi_cost = false;
static bool smi_reset = false;
static int big_num_opt = -1;
static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
static bool forever = false;
static bool force_metric_only = false;
static struct timespec ref_time;
static bool append_file;
static bool interval_count;
static const char *output_name;
static int output_fd;
static char *metrics;
struct perf_stat {
bool record;
struct perf_data data;
struct perf_session *session;
u64 bytes_written;
struct perf_tool tool;
bool maps_allocated;
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
enum aggr_mode aggr_mode;
u32 aggr_level;
};
static struct perf_stat perf_stat;
#define STAT_RECORD perf_stat.record
static volatile sig_atomic_t done = 0;
static struct perf_stat_config stat_config = {
.aggr_mode = AGGR_GLOBAL,
.aggr_level = MAX_CACHE_LVL + 1,
.scale = true,
.unit_width = 4, /* strlen("unit") */
.run_count = 1,
.metric_only_len = METRIC_ONLY_LEN,
.walltime_nsecs_stats = &walltime_nsecs_stats,
.ru_stats = &ru_stats,
.big_num = true,
.ctl_fd = -1,
.ctl_fd_ack = -1,
.iostat_run = false,
};
static bool cpus_map_matched(struct evsel *a, struct evsel *b)
{
if (!a->core.cpus && !b->core.cpus)
return true;
if (!a->core.cpus || !b->core.cpus)
return false;
if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus))
return false;
for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) {
if (perf_cpu_map__cpu(a->core.cpus, i).cpu !=
perf_cpu_map__cpu(b->core.cpus, i).cpu)
return false;
}
return true;
}
static void evlist__check_cpu_maps(struct evlist *evlist)
{
struct evsel *evsel, *warned_leader = NULL;
evlist__for_each_entry(evlist, evsel) {
struct evsel *leader = evsel__leader(evsel);
/* Check that leader matches cpus with each member. */
if (leader == evsel)
continue;
if (cpus_map_matched(leader, evsel))
continue;
/* If there's mismatch disable the group and warn user. */
if (warned_leader != leader) {
char buf[200];
pr_warning("WARNING: grouped events cpus do not match.\n"
"Events with CPUs not matching the leader will "
"be removed from the group.\n");
evsel__group_desc(leader, buf, sizeof(buf));
pr_warning(" %s\n", buf);
warned_leader = leader;
}
if (verbose > 0) {
char buf[200];
cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
pr_warning(" %s: %s\n", leader->name, buf);
cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
pr_warning(" %s: %s\n", evsel->name, buf);
}
evsel__remove_from_group(evsel, leader);
}
}
static inline void diff_timespec(struct timespec *r, struct timespec *a,
struct timespec *b)
{
r->tv_sec = a->tv_sec - b->tv_sec;
if (a->tv_nsec < b->tv_nsec) {
r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
r->tv_sec--;
} else {
r->tv_nsec = a->tv_nsec - b->tv_nsec ;
}
}
static void perf_stat__reset_stats(void)
{
evlist__reset_stats(evsel_list);
perf_stat__reset_shadow_stats();
}
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
perf_stat.bytes_written += event->header.size;
return 0;
}
static int write_stat_round_event(u64 tm, u64 type)
{
return perf_event__synthesize_stat_round(NULL, tm, type,
process_synthesized_event,
NULL);
}
#define WRITE_STAT_ROUND_EVENT(time, interval) \
write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread,
struct perf_counts_values *count)
{
struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread);
struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx);
return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
process_synthesized_event, NULL);
}
static int read_single_counter(struct evsel *counter, int cpu_map_idx,
int thread, struct timespec *rs)
{
switch(counter->tool_event) {
case PERF_TOOL_DURATION_TIME: {
u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
struct perf_counts_values *count =
perf_counts(counter->counts, cpu_map_idx, thread);
count->ena = count->run = val;
count->val = val;
return 0;
}
case PERF_TOOL_USER_TIME:
case PERF_TOOL_SYSTEM_TIME: {
u64 val;
struct perf_counts_values *count =
perf_counts(counter->counts, cpu_map_idx, thread);
if (counter->tool_event == PERF_TOOL_USER_TIME)
val = ru_stats.ru_utime_usec_stat.mean;
else
val = ru_stats.ru_stime_usec_stat.mean;
count->ena = count->run = val;
count->val = val;
return 0;
}
default:
case PERF_TOOL_NONE:
return evsel__read_counter(counter, cpu_map_idx, thread);
case PERF_TOOL_MAX:
/* This should never be reached */
return 0;
}
}
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
*/
static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx)
{
int nthreads = perf_thread_map__nr(evsel_list->core.threads);
int thread;
if (!counter->supported)
return -ENOENT;
for (thread = 0; thread < nthreads; thread++) {
struct perf_counts_values *count;
count = perf_counts(counter->counts, cpu_map_idx, thread);
/*
* The leader's group read loads data into its group members
* (via evsel__read_counter()) and sets their count->loaded.
*/
if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) &&
read_single_counter(counter, cpu_map_idx, thread, rs)) {
counter->counts->scaled = -1;
perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0;
perf_counts(counter->counts, cpu_map_idx, thread)->run = 0;
return -1;
}
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false);
if (STAT_RECORD) {
if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) {
pr_err("failed to write stat event\n");
return -1;
}
}
if (verbose > 1) {
fprintf(stat_config.output,
"%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
evsel__name(counter),
perf_cpu_map__cpu(evsel__cpus(counter),
cpu_map_idx).cpu,
count->val, count->ena, count->run);
}
}
return 0;
}
static int read_affinity_counters(struct timespec *rs)
{
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity;
if (all_counters_use_bpf)
return 0;
if (!target__has_cpu(&target) || target__has_per_thread(&target))
affinity = NULL;
else if (affinity__setup(&saved_affinity) < 0)
return -1;
else
affinity = &saved_affinity;
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
struct evsel *counter = evlist_cpu_itr.evsel;
if (evsel__is_bpf(counter))
continue;
if (!counter->err) {
counter->err = read_counter_cpu(counter, rs,
evlist_cpu_itr.cpu_map_idx);
}
}
if (affinity)
affinity__cleanup(&saved_affinity);
return 0;
}
static int read_bpf_map_counters(void)
{
struct evsel *counter;
int err;
evlist__for_each_entry(evsel_list, counter) {
if (!evsel__is_bpf(counter))
continue;
err = bpf_counter__read(counter);
if (err)
return err;
}
return 0;
}
static int read_counters(struct timespec *rs)
{
if (!stat_config.stop_read_counter) {
if (read_bpf_map_counters() ||
read_affinity_counters(rs))
return -1;
}
return 0;
}
static void process_counters(void)
{
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
if (counter->err)
pr_debug("failed to read counter %s\n", counter->name);
if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter))
pr_warning("failed to process counter %s\n", counter->name);
counter->err = 0;
}
perf_stat_merge_counters(&stat_config, evsel_list);
perf_stat_process_percore(&stat_config, evsel_list);
}
static void process_interval(void)
{
struct timespec ts, rs;
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
evlist__reset_aggr_stats(evsel_list);
if (read_counters(&rs) == 0)
process_counters();
if (STAT_RECORD) {
if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
pr_err("failed to write stat round event\n");
}
init_stats(&walltime_nsecs_stats);
update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
print_counters(&rs, 0, NULL);
}
static bool handle_interval(unsigned int interval, int *times)
{
if (interval) {
process_interval();
if (interval_count && !(--(*times)))
return true;
}
return false;
}
static int enable_counters(void)
{
struct evsel *evsel;
int err;
evlist__for_each_entry(evsel_list, evsel) {
if (!evsel__is_bpf(evsel))
continue;
err = bpf_counter__enable(evsel);
if (err)
return err;
}
if (!target__enable_on_exec(&target)) {
if (!all_counters_use_bpf)
evlist__enable(evsel_list);
}
return 0;
}
static void disable_counters(void)
{
struct evsel *counter;
/*
* If we don't have tracee (attaching to task or cpu), counters may
* still be running. To get accurate group ratios, we must stop groups
* from counting before reading their constituent counters.
*/
if (!target__none(&target)) {
evlist__for_each_entry(evsel_list, counter)
bpf_counter__disable(counter);
if (!all_counters_use_bpf)
evlist__disable(evsel_list);
}
}
static volatile sig_atomic_t workload_exec_errno;
/*
* evlist__prepare_workload will send a SIGUSR1
* if the fork fails, since we asked by setting its
* want_signal to true.
*/
static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
void *ucontext __maybe_unused)
{
workload_exec_errno = info->si_value.sival_int;
}
static bool evsel__should_store_id(struct evsel *counter)
{
return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
}
static bool is_target_alive(struct target *_target,
struct perf_thread_map *threads)
{
struct stat st;
int i;
if (!target__has_task(_target))
return true;
for (i = 0; i < threads->nr; i++) {
char path[PATH_MAX];
scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
threads->map[i].pid);
if (!stat(path, &st))
return true;
}
return false;
}
static void process_evlist(struct evlist *evlist, unsigned int interval)
{
enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
if (evlist__ctlfd_process(evlist, &cmd) > 0) {
switch (cmd) {
case EVLIST_CTL_CMD_ENABLE:
fallthrough;
case EVLIST_CTL_CMD_DISABLE:
if (interval)
process_interval();
break;
case EVLIST_CTL_CMD_SNAPSHOT:
case EVLIST_CTL_CMD_ACK:
case EVLIST_CTL_CMD_UNSUPPORTED:
case EVLIST_CTL_CMD_EVLIST:
case EVLIST_CTL_CMD_STOP:
case EVLIST_CTL_CMD_PING:
default:
break;
}
}
}
static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
int *time_to_sleep)
{
int tts = *time_to_sleep;
struct timespec time_diff;
diff_timespec(&time_diff, time_stop, time_start);
tts -= time_diff.tv_sec * MSEC_PER_SEC +
time_diff.tv_nsec / NSEC_PER_MSEC;
if (tts < 0)
tts = 0;
*time_to_sleep = tts;
}
static int dispatch_events(bool forks, int timeout, int interval, int *times)
{
int child_exited = 0, status = 0;
int time_to_sleep, sleep_time;
struct timespec time_start, time_stop;
if (interval)
sleep_time = interval;
else if (timeout)
sleep_time = timeout;
else
sleep_time = 1000;
time_to_sleep = sleep_time;
while (!done) {
if (forks)
child_exited = waitpid(child_pid, &status, WNOHANG);
else
child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
if (child_exited)
break;
clock_gettime(CLOCK_MONOTONIC, &time_start);
if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
if (timeout || handle_interval(interval, times))
break;
time_to_sleep = sleep_time;
} else { /* fd revent */
process_evlist(evsel_list, interval);
clock_gettime(CLOCK_MONOTONIC, &time_stop);
compute_tts(&time_start, &time_stop, &time_to_sleep);
}
}
return status;
}
enum counter_recovery {
COUNTER_SKIP,
COUNTER_RETRY,
COUNTER_FATAL,
};
static enum counter_recovery stat_handle_error(struct evsel *counter)
{
char msg[BUFSIZ];
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
*/
if (errno == EINVAL || errno == ENOSYS ||
errno == ENOENT || errno == EOPNOTSUPP ||
errno == ENXIO) {
if (verbose > 0)
ui__warning("%s event is not supported by the kernel.\n",
evsel__name(counter));
counter->supported = false;
/*
* errored is a sticky flag that means one of the counter's
* cpu event had a problem and needs to be reexamined.
*/
counter->errored = true;
if ((evsel__leader(counter) != counter) ||
!(counter->core.leader->nr_members > 1))
return COUNTER_SKIP;
} else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
return COUNTER_RETRY;
} else if (target__has_per_thread(&target) &&
evsel_list->core.threads &&
evsel_list->core.threads->err_thread != -1) {
/*
* For global --per-thread case, skip current
* error thread.
*/
if (!thread_map__remove(evsel_list->core.threads,
evsel_list->core.threads->err_thread)) {
evsel_list->core.threads->err_thread = -1;
return COUNTER_RETRY;
}
} else if (counter->skippable) {
if (verbose > 0)
ui__warning("skipping event %s that kernel failed to open .\n",
evsel__name(counter));
counter->supported = false;
counter->errored = true;
return COUNTER_SKIP;
}
evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
if (child_pid != -1)
kill(child_pid, SIGTERM);
return COUNTER_FATAL;
}
static int __run_perf_stat(int argc, const char **argv, int run_idx)
{
int interval = stat_config.interval;
int times = stat_config.times;
int timeout = stat_config.timeout;
char msg[BUFSIZ];
unsigned long long t0, t1;
struct evsel *counter;
size_t l;
int status = 0;
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity = NULL;
int err;
bool second_pass = false;
if (forks) {
if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
perror("failed to prepare workload");
return -1;
}
child_pid = evsel_list->workload.pid;
}
if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return -1;
affinity = &saved_affinity;
}
evlist__for_each_entry(evsel_list, counter) {
counter->reset_group = false;
if (bpf_counter__load(counter, &target))
return -1;
if (!(evsel__is_bperf(counter)))
all_counters_use_bpf = false;
}
evlist__reset_aggr_stats(evsel_list);
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
/*
* bperf calls evsel__open_per_cpu() in bperf__load(), so
* no need to call it again here.
*/
if (target.use_bpf)
break;
if (counter->reset_group || counter->errored)
continue;
if (evsel__is_bperf(counter))
continue;
try_again:
if (create_perf_stat_counter(counter, &stat_config, &target,
evlist_cpu_itr.cpu_map_idx) < 0) {
/*
* Weak group failed. We cannot just undo this here
* because earlier CPUs might be in group mode, and the kernel
* doesn't support mixing group and non group reads. Defer
* it to later.
* Don't close here because we're in the wrong affinity.
*/
if ((errno == EINVAL || errno == EBADF) &&
evsel__leader(counter) != counter &&
counter->weak_group) {
evlist__reset_weak_group(evsel_list, counter, false);
assert(counter->reset_group);
second_pass = true;
continue;
}
switch (stat_handle_error(counter)) {
case COUNTER_FATAL:
return -1;
case COUNTER_RETRY:
goto try_again;
case COUNTER_SKIP:
continue;
default:
break;
}
}
counter->supported = true;
}
if (second_pass) {
/*
* Now redo all the weak group after closing them,
* and also close errored counters.
*/
/* First close errored or weak retry */
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
if (!counter->reset_group && !counter->errored)
continue;
perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
}
/* Now reopen weak */
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
if (!counter->reset_group)
continue;
try_again_reset:
pr_debug2("reopening weak %s\n", evsel__name(counter));
if (create_perf_stat_counter(counter, &stat_config, &target,
evlist_cpu_itr.cpu_map_idx) < 0) {
switch (stat_handle_error(counter)) {
case COUNTER_FATAL:
return -1;
case COUNTER_RETRY:
goto try_again_reset;
case COUNTER_SKIP:
continue;
default:
break;
}
}
counter->supported = true;
}
}
affinity__cleanup(affinity);
evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported) {
perf_evsel__free_fd(&counter->core);
continue;
}
l = strlen(counter->unit);
if (l > stat_config.unit_width)
stat_config.unit_width = l;
if (evsel__should_store_id(counter) &&
evsel__store_ids(counter, evsel_list))
return -1;
}
if (evlist__apply_filters(evsel_list, &counter)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter, evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
}
if (STAT_RECORD) {
int fd = perf_data__fd(&perf_stat.data);
if (is_pipe) {
err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
} else {
err = perf_session__write_header(perf_stat.session, evsel_list,
fd, false);
}
if (err < 0)
return err;
err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
process_synthesized_event, is_pipe);
if (err < 0)
return err;
}
if (target.initial_delay) {
pr_info(EVLIST_DISABLED_MSG);
} else {
err = enable_counters();
if (err)
return -1;
}
/* Exec the command, if any */
if (forks)
evlist__start_workload(evsel_list);
if (target.initial_delay > 0) {
usleep(target.initial_delay * USEC_PER_MSEC);
err = enable_counters();
if (err)
return -1;
pr_info(EVLIST_ENABLED_MSG);
}
t0 = rdclock();
clock_gettime(CLOCK_MONOTONIC, &ref_time);
if (forks) {
if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
status = dispatch_events(forks, timeout, interval, ×);
if (child_pid != -1) {
if (timeout)
kill(child_pid, SIGTERM);
wait4(child_pid, &status, 0, &stat_config.ru_data);
}
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
pr_err("Workload failed: %s\n", emsg);
return -1;
}
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
status = dispatch_events(forks, timeout, interval, ×);
}
disable_counters();
t1 = rdclock();
if (stat_config.walltime_run_table)
stat_config.walltime_run[run_idx] = t1 - t0;
if (interval && stat_config.summary) {
stat_config.interval = 0;
stat_config.stop_read_counter = true;
init_stats(&walltime_nsecs_stats);
update_stats(&walltime_nsecs_stats, t1 - t0);
evlist__copy_prev_raw_counts(evsel_list);
evlist__reset_prev_raw_counts(evsel_list);
evlist__reset_aggr_stats(evsel_list);
} else {
update_stats(&walltime_nsecs_stats, t1 - t0);
update_rusage_stats(&ru_stats, &stat_config.ru_data);
}
/*
* Closing a group leader splits the group, and as we only disable
* group leaders, results in remaining events becoming enabled. To
* avoid arbitrary skew, we must read all counters before closing any
* group leaders.
*/
if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
process_counters();
/*
* We need to keep evsel_list alive, because it's processed
* later the evsel_list will be closed after.
*/
if (!STAT_RECORD)
evlist__close(evsel_list);
return WEXITSTATUS(status);
}
static int run_perf_stat(int argc, const char **argv, int run_idx)
{
int ret;
if (pre_cmd) {
ret = system(pre_cmd);
if (ret)
return ret;
}
if (sync_run)
sync();
ret = __run_perf_stat(argc, argv, run_idx);
if (ret)
return ret;
if (post_cmd) {
ret = system(post_cmd);
if (ret)
return ret;
}
return ret;
}
static void print_counters(struct timespec *ts, int argc, const char **argv)
{
/* Do not print anything if we record to the pipe. */
if (STAT_RECORD && perf_stat.data.is_pipe)
return;
if (quiet)
return;
evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
}
static volatile sig_atomic_t signr = -1;
static void skip_signal(int signo)
{
if ((child_pid == -1) || stat_config.interval)
done = 1;
signr = signo;
/*
* render child_pid harmless
* won't send SIGTERM to a random
* process in case of race condition
* and fast PID recycling
*/
child_pid = -1;
}
static void sig_atexit(void)
{
sigset_t set, oset;
/*
* avoid race condition with SIGCHLD handler
* in skip_signal() which is modifying child_pid
* goal is to avoid send SIGTERM to a random
* process
*/
sigemptyset(&set);
sigaddset(&set, SIGCHLD);
sigprocmask(SIG_BLOCK, &set, &oset);
if (child_pid != -1)
kill(child_pid, SIGTERM);
sigprocmask(SIG_SETMASK, &oset, NULL);
if (signr == -1)
return;
signal(signr, SIG_DFL);
kill(getpid(), signr);
}
void perf_stat__set_big_num(int set)
{
stat_config.big_num = (set != 0);
}
void perf_stat__set_no_csv_summary(int set)
{
stat_config.no_csv_summary = (set != 0);
}
static int stat__set_big_num(const struct option *opt __maybe_unused,
const char *s __maybe_unused, int unset)
{
big_num_opt = unset ? 0 : 1;
perf_stat__set_big_num(!unset);
return 0;
}
static int enable_metric_only(const struct option *opt __maybe_unused,
const char *s __maybe_unused, int unset)
{
force_metric_only = true;
stat_config.metric_only = !unset;
return 0;
}
static int append_metric_groups(const struct option *opt __maybe_unused,
const char *str,
int unset __maybe_unused)
{
if (metrics) {
char *tmp;
if (asprintf(&tmp, "%s,%s", metrics, str) < 0)
return -ENOMEM;
free(metrics);
metrics = tmp;
} else {
metrics = strdup(str);
if (!metrics)
return -ENOMEM;
}
return 0;
}
static int parse_control_option(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
struct perf_stat_config *config = opt->value;
return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close);
}
static int parse_stat_cgroups(const struct option *opt,
const char *str, int unset)
{
if (stat_config.cgroup_list) {
pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
return -1;
}
return parse_cgroups(opt, str, unset);
}
static int parse_cputype(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
const struct perf_pmu *pmu;
struct evlist *evlist = *(struct evlist **)opt->value;
if (!list_empty(&evlist->core.entries)) {
fprintf(stderr, "Must define cputype before events/metrics\n");
return -1;
}
pmu = perf_pmus__pmu_for_pmu_filter(str);
if (!pmu) {
fprintf(stderr, "--cputype %s is not supported!\n", str);
return -1;
}
parse_events_option_args.pmu_filter = pmu->name;
return 0;
}
static int parse_cache_level(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
int level;
u32 *aggr_mode = (u32 *)opt->value;
u32 *aggr_level = (u32 *)opt->data;
/*
* If no string is specified, aggregate based on the topology of
* Last Level Cache (LLC). Since the LLC level can change from
* architecture to architecture, set level greater than
* MAX_CACHE_LVL which will be interpreted as LLC.
*/
if (str == NULL) {
level = MAX_CACHE_LVL + 1;
goto out;
}
/*
* The format to specify cache level is LX or lX where X is the
* cache level.
*/
if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) {
pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n",
MAX_CACHE_LVL,
MAX_CACHE_LVL);
return -EINVAL;
}
level = atoi(&str[1]);
if (level < 1) {
pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n",
MAX_CACHE_LVL,
MAX_CACHE_LVL);
return -EINVAL;
}
if (level > MAX_CACHE_LVL) {
pr_err("perf only supports max cache level of %d.\n"
"Consider increasing MAX_CACHE_LVL\n", MAX_CACHE_LVL);
return -EINVAL;
}
out:
*aggr_mode = AGGR_CACHE;
*aggr_level = level;
return 0;
}
static struct option stat_options[] = {
OPT_BOOLEAN('T', "transaction", &transaction_run,
"hardware transaction statistics"),
OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_CALLBACK(0, "filter", &evsel_list, "filter",
"event filter", parse_filter),
OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
"child tasks do not inherit counters"),
OPT_STRING('p', "pid", &target.pid, "pid",
"stat events on existing process id"),
OPT_STRING('t', "tid", &target.tid, "tid",
"stat events on existing thread id"),
#ifdef HAVE_BPF_SKEL
OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
"stat events on existing bpf program id"),
OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
"use bpf program to count events"),
OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
"path to perf_event_attr map"),
#endif
OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"system-wide collection from all CPUs"),
OPT_BOOLEAN(0, "scale", &stat_config.scale,
"Use --no-scale to disable counter scaling for multiplexing"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &stat_config.run_count,
"repeat command and print average + stddev (max: 100, forever: 0)"),
OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
"display details about each run (only with -r option)"),
OPT_BOOLEAN('n', "null", &stat_config.null_run,
"null run - dont start any counters"),
OPT_INCR('d', "detailed", &detailed_run,
"detailed run - start a lot of events"),
OPT_BOOLEAN('S', "sync", &sync_run,
"call sync() before starting a run"),
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators",
stat__set_big_num),
OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
"list of cpus to monitor in system-wide"),
OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
"Merge identical named hybrid events"),
OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
"print counts with custom separator"),
OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
"print counts in JSON format"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
"monitor event in cgroup name only", parse_stat_cgroups),
OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
"expand events for each cgroup"),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
OPT_INTEGER(0, "log-fd", &output_fd,
"log output to fd, instead of stderr"),
OPT_STRING(0, "pre", &pre_cmd, "command",
"command to run prior to the measured command"),
OPT_STRING(0, "post", &post_cmd, "command",
"command to run after to the measured command"),
OPT_UINTEGER('I', "interval-print", &stat_config.interval,
"print counts at regular interval in ms "
"(overhead is possible for values <= 100ms)"),
OPT_INTEGER(0, "interval-count", &stat_config.times,
"print counts for fixed number of times"),
OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
"clear screen in between new interval"),
OPT_UINTEGER(0, "timeout", &stat_config.timeout,
"stop workload and print counts after a timeout period in ms (>= 10ms)"),
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
"aggregate counts per processor die", AGGR_DIE),
OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level,
"cache level", "aggregate count at this cache level (Default: LLC)",
parse_cache_level),
OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
"aggregate counts per thread", AGGR_THREAD),
OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
"aggregate counts per numa node", AGGR_NODE),
OPT_INTEGER('D', "delay", &target.initial_delay,
"ms to wait before starting measurement after program start (-1: start with events disabled)"),
OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
"Only print computed metrics. No raw values", enable_metric_only),
OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
"don't group metric events, impacts multiplexing"),
OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
"don't try to share events between metrics in a group"),
OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
"don't try to share events between metrics in a group "),
OPT_BOOLEAN(0, "topdown", &topdown_run,
"measure top-down statistics"),
OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
"Set the metrics level for the top-down statistics (0: max level)"),
OPT_BOOLEAN(0, "smi-cost", &smi_cost,
"measure SMI cost"),
OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
"monitor specified metrics or metric groups (separated by ,)",
append_metric_groups),
OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
"Configure all used events to run in kernel space.",
PARSE_OPT_EXCLUSIVE),
OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
"Configure all used events to run in user space.",
PARSE_OPT_EXCLUSIVE),
OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
"Use with 'percore' event qualifier to show the event "
"counts of one hardware thread by sum up total hardware "
"threads of same physical core"),
OPT_BOOLEAN(0, "summary", &stat_config.summary,
"print summary for interval mode"),
OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
"don't print 'summary' for CSV summary output"),
OPT_BOOLEAN(0, "quiet", &quiet,
"don't print any output, messages or warnings (useful with record)"),
OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
"Only enable events on applying cpu with this type "
"for hybrid platform (e.g. core or atom)",
parse_cputype),
#ifdef HAVE_LIBPFM
OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
"libpfm4 event selector. use 'perf list' to list available events",
parse_libpfm_events_option),
#endif
OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
"\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
"\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
parse_control_option),
OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
"measure I/O performance metrics provided by arch/platform",
iostat_parse),
OPT_END()
};
/**
* Calculate the cache instance ID from the map in
* /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list
* Cache instance ID is the first CPU reported in the shared_cpu_list file.
*/
static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
{
int id;
struct perf_cpu_map *cpu_map = perf_cpu_map__new(map);
/*
* If the map contains no CPU, consider the current CPU to
* be the first online CPU in the cache domain else use the
* first online CPU of the cache domain as the ID.
*/
if (perf_cpu_map__empty(cpu_map))
id = cpu.cpu;
else
id = perf_cpu_map__cpu(cpu_map, 0).cpu;
/* Free the perf_cpu_map used to find the cache ID */
perf_cpu_map__put(cpu_map);
return id;
}
/**
* cpu__get_cache_id - Returns 0 if successful in populating the
* cache level and cache id. Cache level is read from
* /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID
* is the first CPU reported by
* /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list
*/
static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache)
{
int ret = 0;
u32 cache_level = stat_config.aggr_level;
struct cpu_cache_level caches[MAX_CACHE_LVL];
u32 i = 0, caches_cnt = 0;
cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level;
cache->cache = -1;
ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt);
if (ret) {
/*
* If caches_cnt is not 0, cpu_cache_level data
* was allocated when building the topology.
* Free the allocated data before returning.
*/
if (caches_cnt)
goto free_caches;
return ret;
}
if (!caches_cnt)
return -1;
/*
* Save the data for the highest level if no
* level was specified by the user.
*/
if (cache_level > MAX_CACHE_LVL) {
int max_level_index = 0;
for (i = 1; i < caches_cnt; ++i) {
if (caches[i].level > caches[max_level_index].level)
max_level_index = i;
}
cache->cache_lvl = caches[max_level_index].level;
cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map);
/* Reset i to 0 to free entire caches[] */
i = 0;
goto free_caches;
}
for (i = 0; i < caches_cnt; ++i) {
if (caches[i].level == cache_level) {
cache->cache_lvl = cache_level;
cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
}
cpu_cache_level__free(&caches[i]);
}
free_caches:
/*
* Free all the allocated cpu_cache_level data.
*/
while (i < caches_cnt)
cpu_cache_level__free(&caches[i++]);
return ret;
}
/**
* aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache
* level, die and socket populated with the cache instache ID, cache level,
* die and socket for cpu. The function signature is compatible with
* aggr_cpu_id_get_t.
*/
static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data)
{
int ret;
struct aggr_cpu_id id;
struct perf_cache cache;
id = aggr_cpu_id__die(cpu, data);
if (aggr_cpu_id__is_empty(&id))
return id;
ret = cpu__get_cache_details(cpu, &cache);
if (ret)
return id;
id.cache_lvl = cache.cache_lvl;
id.cache = cache.cache;
return id;
}
static const char *const aggr_mode__string[] = {
[AGGR_CORE] = "core",
[AGGR_CACHE] = "cache",
[AGGR_DIE] = "die",
[AGGR_GLOBAL] = "global",
[AGGR_NODE] = "node",
[AGGR_NONE] = "none",
[AGGR_SOCKET] = "socket",
[AGGR_THREAD] = "thread",
[AGGR_UNSET] = "unset",
};
static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return aggr_cpu_id__socket(cpu, /*data=*/NULL);
}
static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return aggr_cpu_id__die(cpu, /*data=*/NULL);
}
static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return aggr_cpu_id__cache(cpu, /*data=*/NULL);
}
static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return aggr_cpu_id__core(cpu, /*data=*/NULL);
}
static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return aggr_cpu_id__node(cpu, /*data=*/NULL);
}
static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return aggr_cpu_id__global(cpu, /*data=*/NULL);
}
static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return aggr_cpu_id__cpu(cpu, /*data=*/NULL);
}
static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config,
aggr_get_id_t get_id, struct perf_cpu cpu)
{
struct aggr_cpu_id id;
/* per-process mode - should use global aggr mode */
if (cpu.cpu == -1)
return get_id(config, cpu);
if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
id = config->cpus_aggr_map->map[cpu.cpu];
return id;
}
static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config,
struct perf_cpu cpu)
{
return perf_stat__get_aggr(config, perf_stat__get_socket, cpu);
}
static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config,
struct perf_cpu cpu)
{
return perf_stat__get_aggr(config, perf_stat__get_die, cpu);
}
static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config,
struct perf_cpu cpu)
{
return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu);
}
static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config,
struct perf_cpu cpu)
{
return perf_stat__get_aggr(config, perf_stat__get_core, cpu);
}
static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config,
struct perf_cpu cpu)
{
return perf_stat__get_aggr(config, perf_stat__get_node, cpu);
}
static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config,
struct perf_cpu cpu)
{
return perf_stat__get_aggr(config, perf_stat__get_global, cpu);
}
static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config,
struct perf_cpu cpu)
{
return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu);
}
static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode)
{
switch (aggr_mode) {
case AGGR_SOCKET:
return aggr_cpu_id__socket;
case AGGR_DIE:
return aggr_cpu_id__die;
case AGGR_CACHE:
return aggr_cpu_id__cache;
case AGGR_CORE:
return aggr_cpu_id__core;
case AGGR_NODE:
return aggr_cpu_id__node;
case AGGR_NONE:
return aggr_cpu_id__cpu;
case AGGR_GLOBAL:
return aggr_cpu_id__global;
case AGGR_THREAD:
case AGGR_UNSET:
case AGGR_MAX:
default:
return NULL;
}
}
static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode)
{
switch (aggr_mode) {
case AGGR_SOCKET:
return perf_stat__get_socket_cached;
case AGGR_DIE:
return perf_stat__get_die_cached;
case AGGR_CACHE:
return perf_stat__get_cache_id_cached;
case AGGR_CORE:
return perf_stat__get_core_cached;
case AGGR_NODE:
return perf_stat__get_node_cached;
case AGGR_NONE:
return perf_stat__get_cpu_cached;
case AGGR_GLOBAL:
return perf_stat__get_global_cached;
case AGGR_THREAD:
case AGGR_UNSET:
case AGGR_MAX:
default:
return NULL;
}
}
static int perf_stat_init_aggr_mode(void)
{
int nr;
aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
if (get_id) {
bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
get_id, /*data=*/NULL, needs_sort);
if (!stat_config.aggr_map) {
pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
return -1;
}
stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode);
}
if (stat_config.aggr_mode == AGGR_THREAD) {
nr = perf_thread_map__nr(evsel_list->core.threads);
stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
if (stat_config.aggr_map == NULL)
return -ENOMEM;
for (int s = 0; s < nr; s++) {
struct aggr_cpu_id id = aggr_cpu_id__empty();
id.thread_idx = s;
stat_config.aggr_map->map[s] = id;
}
return 0;
}
/*
* The evsel_list->cpus is the base we operate on,
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
if (evsel_list->core.user_requested_cpus)
nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else
nr = 0;
stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
}
static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
{
if (map) {
WARN_ONCE(refcount_read(&map->refcnt) != 0,
"cpu_aggr_map refcnt unbalanced\n");
free(map);
}
}
static void cpu_aggr_map__put(struct cpu_aggr_map *map)
{
if (map && refcount_dec_and_test(&map->refcnt))
cpu_aggr_map__delete(map);
}
static void perf_stat__exit_aggr_mode(void)
{
cpu_aggr_map__put(stat_config.aggr_map);
cpu_aggr_map__put(stat_config.cpus_aggr_map);
stat_config.aggr_map = NULL;
stat_config.cpus_aggr_map = NULL;
}
static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct perf_env *env = data;
struct aggr_cpu_id id = aggr_cpu_id__empty();
if (cpu.cpu != -1)
id.socket = env->cpu[cpu.cpu].socket_id;
return id;
}
static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct perf_env *env = data;
struct aggr_cpu_id id = aggr_cpu_id__empty();
if (cpu.cpu != -1) {
/*
* die_id is relative to socket, so start
* with the socket ID and then add die to
* make a unique ID.
*/
id.socket = env->cpu[cpu.cpu].socket_id;
id.die = env->cpu[cpu.cpu].die_id;
}
return id;
}
static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env,
u32 cache_level, struct aggr_cpu_id *id)
{
int i;
int caches_cnt = env->caches_cnt;
struct cpu_cache_level *caches = env->caches;
id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level;
id->cache = -1;
if (!caches_cnt)
return;
for (i = caches_cnt - 1; i > -1; --i) {
struct perf_cpu_map *cpu_map;
int map_contains_cpu;
/*
* If user has not specified a level, find the fist level with
* the cpu in the map. Since building the map is expensive, do
* this only if levels match.
*/
if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level)
continue;
cpu_map = perf_cpu_map__new(caches[i].map);
map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu);
perf_cpu_map__put(cpu_map);
if (map_contains_cpu != -1) {
id->cache_lvl = caches[i].level;
id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
return;
}
}
}
static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu,
void *data)
{
struct perf_env *env = data;
struct aggr_cpu_id id = aggr_cpu_id__empty();
if (cpu.cpu != -1) {
u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level;
id.socket = env->cpu[cpu.cpu].socket_id;
id.die = env->cpu[cpu.cpu].die_id;
perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id);
}
return id;
}
static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct perf_env *env = data;
struct aggr_cpu_id id = aggr_cpu_id__empty();
if (cpu.cpu != -1) {
/*
* core_id is relative to socket and die,
* we need a global id. So we set
* socket, die id and core id
*/
id.socket = env->cpu[cpu.cpu].socket_id;
id.die = env->cpu[cpu.cpu].die_id;
id.core = env->cpu[cpu.cpu].core_id;
}
return id;
}
static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct perf_env *env = data;
struct aggr_cpu_id id = aggr_cpu_id__empty();
if (cpu.cpu != -1) {
/*
* core_id is relative to socket and die,
* we need a global id. So we set
* socket, die id and core id
*/
id.socket = env->cpu[cpu.cpu].socket_id;
id.die = env->cpu[cpu.cpu].die_id;
id.core = env->cpu[cpu.cpu].core_id;
id.cpu = cpu;
}
return id;
}
static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct aggr_cpu_id id = aggr_cpu_id__empty();
id.node = perf_env__numa_node(data, cpu);
return id;
}
static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,
void *data __maybe_unused)
{
struct aggr_cpu_id id = aggr_cpu_id__empty();
/* it always aggregates to the cpu 0 */
id.cpu = (struct perf_cpu){ .cpu = 0 };
return id;
}
static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)
{
switch (aggr_mode) {
case AGGR_SOCKET:
return perf_env__get_socket_aggr_by_cpu;
case AGGR_DIE:
return perf_env__get_die_aggr_by_cpu;
case AGGR_CACHE:
return perf_env__get_cache_aggr_by_cpu;
case AGGR_CORE:
return perf_env__get_core_aggr_by_cpu;
case AGGR_NODE:
return perf_env__get_node_aggr_by_cpu;
case AGGR_GLOBAL:
return perf_env__get_global_aggr_by_cpu;
case AGGR_NONE:
return perf_env__get_cpu_aggr_by_cpu;
case AGGR_THREAD:
case AGGR_UNSET:
case AGGR_MAX:
default:
return NULL;
}
}
static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode)
{
switch (aggr_mode) {
case AGGR_SOCKET:
return perf_stat__get_socket_file;
case AGGR_DIE:
return perf_stat__get_die_file;
case AGGR_CACHE:
return perf_stat__get_cache_file;
case AGGR_CORE:
return perf_stat__get_core_file;
case AGGR_NODE:
return perf_stat__get_node_file;
case AGGR_GLOBAL:
return perf_stat__get_global_file;
case AGGR_NONE:
return perf_stat__get_cpu_file;
case AGGR_THREAD:
case AGGR_UNSET:
case AGGR_MAX:
default:
return NULL;
}
}
static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
{
struct perf_env *env = &st->session->header.env;
aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode);
bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
if (stat_config.aggr_mode == AGGR_THREAD) {
int nr = perf_thread_map__nr(evsel_list->core.threads);
stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
if (stat_config.aggr_map == NULL)
return -ENOMEM;
for (int s = 0; s < nr; s++) {
struct aggr_cpu_id id = aggr_cpu_id__empty();
id.thread_idx = s;
stat_config.aggr_map->map[s] = id;
}
return 0;
}
if (!get_id)
return 0;
stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
get_id, env, needs_sort);
if (!stat_config.aggr_map) {
pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
return -1;
}
stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode);
return 0;
}
/*
* Add default attributes, if there were no attributes specified or
* if -d/--detailed, -d -d or -d -d -d is used:
*/
static int add_default_attributes(void)
{
struct perf_event_attr default_attrs0[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
};
struct perf_event_attr frontend_attrs[] = {
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
};
struct perf_event_attr backend_attrs[] = {
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
struct perf_event_attr default_attrs1[] = {
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
};
/*
* Detailed stats (-d), covering the L1 and last level data caches:
*/
struct perf_event_attr detailed_attrs[] = {
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_LL << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_LL << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
/*
* Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
*/
struct perf_event_attr very_detailed_attrs[] = {
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1I << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1I << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_DTLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_DTLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_ITLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_ITLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
/*
* Very, very detailed stats (-d -d -d), adding prefetch events:
*/
struct perf_event_attr very_very_detailed_attrs[] = {
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
struct perf_event_attr default_null_attrs[] = {};
const char *pmu = parse_events_option_args.pmu_filter ?: "all";
/* Set attrs if no event is selected and !null_run: */
if (stat_config.null_run)
return 0;
if (transaction_run) {
/* Handle -T as -M transaction. Once platform specific metrics
* support has been added to the json files, all architectures
* will use this approach. To determine transaction support
* on an architecture test for such a metric name.
*/
if (!metricgroup__has_metric(pmu, "transaction")) {
pr_err("Missing transaction metrics\n");
return -1;
}
return metricgroup__parse_groups(evsel_list, pmu, "transaction",
stat_config.metric_no_group,
stat_config.metric_no_merge,
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
&stat_config.metric_events);
}
if (smi_cost) {
int smi;
if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
pr_err("freeze_on_smi is not supported.\n");
return -1;
}
if (!smi) {
if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
fprintf(stderr, "Failed to set freeze_on_smi.\n");
return -1;
}
smi_reset = true;
}
if (!metricgroup__has_metric(pmu, "smi")) {
pr_err("Missing smi metrics\n");
return -1;
}
if (!force_metric_only)
stat_config.metric_only = true;
return metricgroup__parse_groups(evsel_list, pmu, "smi",
stat_config.metric_no_group,
stat_config.metric_no_merge,
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
&stat_config.metric_events);
}
if (topdown_run) {
unsigned int max_level = metricgroups__topdown_max_level();
char str[] = "TopdownL1";
if (!force_metric_only)
stat_config.metric_only = true;
if (!max_level) {
pr_err("Topdown requested but the topdown metric groups aren't present.\n"
"(See perf list the metric groups have names like TopdownL1)\n");
return -1;
}
if (stat_config.topdown_level > max_level) {
pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level);
return -1;
} else if (!stat_config.topdown_level)
stat_config.topdown_level = 1;
if (!stat_config.interval && !stat_config.metric_only) {
fprintf(stat_config.output,
"Topdown accuracy may decrease when measuring long periods.\n"
"Please print the result regularly, e.g. -I1000\n");
}
str[8] = stat_config.topdown_level + '0';
if (metricgroup__parse_groups(evsel_list,
pmu, str,
/*metric_no_group=*/false,
/*metric_no_merge=*/false,
/*metric_no_threshold=*/true,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
&stat_config.metric_events) < 0)
return -1;
}
if (!stat_config.topdown_level)
stat_config.topdown_level = 1;
if (!evsel_list->core.nr_entries) {
/* No events so add defaults. */
if (target__has_cpu(&target))
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
return -1;
if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) {
if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
return -1;
}
if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) {
if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
return -1;
}
if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
return -1;
/*
* Add TopdownL1 metrics if they exist. To minimize
* multiplexing, don't request threshold computation.
*/
if (metricgroup__has_metric(pmu, "Default")) {
struct evlist *metric_evlist = evlist__new();
struct evsel *metric_evsel;
if (!metric_evlist)
return -1;
if (metricgroup__parse_groups(metric_evlist, pmu, "Default",
/*metric_no_group=*/false,
/*metric_no_merge=*/false,
/*metric_no_threshold=*/true,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
&stat_config.metric_events) < 0)
return -1;
evlist__for_each_entry(metric_evlist, metric_evsel) {
metric_evsel->skippable = true;
metric_evsel->default_metricgroup = true;
}
evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
evlist__delete(metric_evlist);
}
/* Platform specific attrs */
if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
return -1;
}
/* Detailed events get appended to the event list: */
if (detailed_run < 1)
return 0;
/* Append detailed run extra attributes: */
if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
return -1;
if (detailed_run < 2)
return 0;
/* Append very detailed run extra attributes: */
if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
return -1;
if (detailed_run < 3)
return 0;
/* Append very, very detailed run extra attributes: */
return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
}
static const char * const stat_record_usage[] = {
"perf stat record [<options>]",
NULL,
};
static void init_features(struct perf_session *session)
{
int feat;
for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
perf_header__set_feat(&session->header, feat);
perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
}
static int __cmd_record(int argc, const char **argv)
{
struct perf_session *session;
struct perf_data *data = &perf_stat.data;
argc = parse_options(argc, argv, stat_options, stat_record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (output_name)
data->path = output_name;
if (stat_config.run_count != 1 || forever) {
pr_err("Cannot use -r option with perf stat record.\n");
return -1;
}
session = perf_session__new(data, NULL);
if (IS_ERR(session)) {
pr_err("Perf session creation failed\n");
return PTR_ERR(session);
}
init_features(session);
session->evlist = evsel_list;
perf_stat.session = session;
perf_stat.record = true;
return argc;
}
static int process_stat_round_event(struct perf_session *session,
union perf_event *event)
{
struct perf_record_stat_round *stat_round = &event->stat_round;
struct timespec tsh, *ts = NULL;
const char **argv = session->header.env.cmdline_argv;
int argc = session->header.env.nr_cmdline;
process_counters();
if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
update_stats(&walltime_nsecs_stats, stat_round->time);
if (stat_config.interval && stat_round->time) {
tsh.tv_sec = stat_round->time / NSEC_PER_SEC;
tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
ts = &tsh;
}
print_counters(ts, argc, argv);
return 0;
}
static
int process_stat_config_event(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
perf_event__read_stat_config(&stat_config, &event->stat_config);
if (perf_cpu_map__empty(st->cpus)) {
if (st->aggr_mode != AGGR_UNSET)
pr_warning("warning: processing task data, aggregation mode not set\n");
} else if (st->aggr_mode != AGGR_UNSET) {
stat_config.aggr_mode = st->aggr_mode;
}
if (perf_stat.data.is_pipe)
perf_stat_init_aggr_mode();
else
perf_stat_init_aggr_mode_file(st);
if (stat_config.aggr_map) {
int nr_aggr = stat_config.aggr_map->nr;
if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) {
pr_err("cannot allocate aggr counts\n");
return -1;
}
}
return 0;
}
static int set_maps(struct perf_stat *st)
{
if (!st->cpus || !st->threads)
return 0;
if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
return -EINVAL;
perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true))
return -ENOMEM;
st->maps_allocated = true;
return 0;
}
static
int process_thread_map_event(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
if (st->threads) {
pr_warning("Extra thread map event, ignoring.\n");
return 0;
}
st->threads = thread_map__new_event(&event->thread_map);
if (!st->threads)
return -ENOMEM;
return set_maps(st);
}
static
int process_cpu_map_event(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
struct perf_cpu_map *cpus;
if (st->cpus) {
pr_warning("Extra cpu map event, ignoring.\n");
return 0;
}
cpus = cpu_map__new_data(&event->cpu_map.data);
if (!cpus)
return -ENOMEM;
st->cpus = cpus;
return set_maps(st);
}
static const char * const stat_report_usage[] = {
"perf stat report [<options>]",
NULL,
};
static struct perf_stat perf_stat = {
.tool = {
.attr = perf_event__process_attr,
.event_update = perf_event__process_event_update,
.thread_map = process_thread_map_event,
.cpu_map = process_cpu_map_event,
.stat_config = process_stat_config_event,
.stat = perf_event__process_stat_event,
.stat_round = process_stat_round_event,
},
.aggr_mode = AGGR_UNSET,
.aggr_level = 0,
};
static int __cmd_report(int argc, const char **argv)
{
struct perf_session *session;
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
"aggregate counts per processor die", AGGR_DIE),
OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level,
"cache level",
"aggregate count at this cache level (Default: LLC)",
parse_cache_level),
OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
"aggregate counts per numa node", AGGR_NODE),
OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_END()
};
struct stat st;
int ret;
argc = parse_options(argc, argv, options, stat_report_usage, 0);
if (!input_name || !strlen(input_name)) {
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
input_name = "-";
else
input_name = "perf.data";
}
perf_stat.data.path = input_name;
perf_stat.data.mode = PERF_DATA_MODE_READ;
session = perf_session__new(&perf_stat.data, &perf_stat.tool);
if (IS_ERR(session))
return PTR_ERR(session);
perf_stat.session = session;
stat_config.output = stderr;
evlist__delete(evsel_list);
evsel_list = session->evlist;
ret = perf_session__process_events(session);
if (ret)
return ret;
perf_session__delete(session);
return 0;
}
static void setup_system_wide(int forks)
{
/*
* Make system wide (-a) the default target if
* no target was specified and one of following
* conditions is met:
*
* - there's no workload specified
* - there is workload specified but all requested
* events are system wide events
*/
if (!target__none(&target))
return;
if (!forks)
target.system_wide = true;
else {
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
if (!counter->core.requires_cpu &&
!evsel__name_is(counter, "duration_time")) {
return;
}
}
if (evsel_list->core.nr_entries)
target.system_wide = true;
}
}
int cmd_stat(int argc, const char **argv)
{
const char * const stat_usage[] = {
"perf stat [<options>] [<command>]",
NULL
};
int status = -EINVAL, run_idx, err;
const char *mode;
FILE *output = stderr;
unsigned int interval, timeout;
const char * const stat_subcommands[] = { "record", "report" };
char errbuf[BUFSIZ];
setlocale(LC_ALL, "");
evsel_list = evlist__new();
if (evsel_list == NULL)
return -ENOMEM;
parse_events__shrink_config_terms();
/* String-parsing callback-based options would segfault when negated */
set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
(const char **) stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (stat_config.csv_sep) {
stat_config.csv_output = true;
if (!strcmp(stat_config.csv_sep, "\\t"))
stat_config.csv_sep = "\t";
} else
stat_config.csv_sep = DEFAULT_SEPARATOR;
if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
argc = __cmd_record(argc, argv);
if (argc < 0)
return -1;
} else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0]))
return __cmd_report(argc, argv);
interval = stat_config.interval;
timeout = stat_config.timeout;
/*
* For record command the -o is already taken care of.
*/
if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
output = NULL;
if (output_name && output_fd) {
fprintf(stderr, "cannot use both --output and --log-fd\n");
parse_options_usage(stat_usage, stat_options, "o", 1);
parse_options_usage(NULL, stat_options, "log-fd", 0);
goto out;
}
if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
fprintf(stderr, "--metric-only is not supported with --per-thread\n");
goto out;
}
if (stat_config.metric_only && stat_config.run_count > 1) {
fprintf(stderr, "--metric-only is not supported with -r\n");
goto out;
}
if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
fprintf(stderr, "--table is only supported with -r\n");
parse_options_usage(stat_usage, stat_options, "r", 1);
parse_options_usage(NULL, stat_options, "table", 0);
goto out;
}
if (output_fd < 0) {
fprintf(stderr, "argument to --log-fd must be a > 0\n");
parse_options_usage(stat_usage, stat_options, "log-fd", 0);
goto out;
}
if (!output && !quiet) {
struct timespec tm;
mode = append_file ? "a" : "w";
output = fopen(output_name, mode);
if (!output) {
perror("failed to create output file");
return -1;
}
if (!stat_config.json_output) {
clock_gettime(CLOCK_REALTIME, &tm);
fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
}
} else if (output_fd > 0) {
mode = append_file ? "a" : "w";
output = fdopen(output_fd, mode);
if (!output) {
perror("Failed opening logfd");
return -errno;
}
}
if (stat_config.interval_clear && !isatty(fileno(output))) {
fprintf(stderr, "--interval-clear does not work with output\n");
parse_options_usage(stat_usage, stat_options, "o", 1);
parse_options_usage(NULL, stat_options, "log-fd", 0);
parse_options_usage(NULL, stat_options, "interval-clear", 0);
return -1;
}
stat_config.output = output;
/*
* let the spreadsheet do the pretty-printing
*/
if (stat_config.csv_output) {
/* User explicitly passed -B? */
if (big_num_opt == 1) {
fprintf(stderr, "-B option not supported with -x\n");
parse_options_usage(stat_usage, stat_options, "B", 1);
parse_options_usage(NULL, stat_options, "x", 1);
goto out;
} else /* Nope, so disable big number formatting */
stat_config.big_num = false;
} else if (big_num_opt == 0) /* User passed --no-big-num */
stat_config.big_num = false;
err = target__validate(&target);
if (err) {
target__strerror(&target, err, errbuf, BUFSIZ);
pr_warning("%s\n", errbuf);
}
setup_system_wide(argc);
/*
* Display user/system times only for single
* run and when there's specified tracee.
*/
if ((stat_config.run_count == 1) && target__none(&target))
stat_config.ru_display = true;
if (stat_config.run_count < 0) {
pr_err("Run count must be a positive number\n");
parse_options_usage(stat_usage, stat_options, "r", 1);
goto out;
} else if (stat_config.run_count == 0) {
forever = true;
stat_config.run_count = 1;
}
if (stat_config.walltime_run_table) {
stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
if (!stat_config.walltime_run) {
pr_err("failed to setup -r option");
goto out;
}
}
if ((stat_config.aggr_mode == AGGR_THREAD) &&
!target__has_task(&target)) {
if (!target.system_wide || target.cpu_list) {
fprintf(stderr, "The --per-thread option is only "
"available when monitoring via -p -t -a "
"options or only --per-thread.\n");
parse_options_usage(NULL, stat_options, "p", 1);
parse_options_usage(NULL, stat_options, "t", 1);
goto out;
}
}
/*
* no_aggr, cgroup are for system-wide only
* --per-thread is aggregated per thread, we dont mix it with cpu mode
*/
if (((stat_config.aggr_mode != AGGR_GLOBAL &&
stat_config.aggr_mode != AGGR_THREAD) ||
(nr_cgroups || stat_config.cgroup_list)) &&
!target__has_cpu(&target)) {
fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n");
parse_options_usage(stat_usage, stat_options, "G", 1);
parse_options_usage(NULL, stat_options, "A", 1);
parse_options_usage(NULL, stat_options, "a", 1);
parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
goto out;
}
if (stat_config.iostat_run) {
status = iostat_prepare(evsel_list, &stat_config);
if (status)
goto out;
if (iostat_mode == IOSTAT_LIST) {
iostat_list(evsel_list, &stat_config);
goto out;
} else if (verbose > 0)
iostat_list(evsel_list, &stat_config);
if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
target.system_wide = true;
}
if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
target.per_thread = true;
stat_config.system_wide = target.system_wide;
if (target.cpu_list) {
stat_config.user_requested_cpu_list = strdup(target.cpu_list);
if (!stat_config.user_requested_cpu_list) {
status = -ENOMEM;
goto out;
}
}
/*
* Metric parsing needs to be delayed as metrics may optimize events
* knowing the target is system-wide.
*/
if (metrics) {
const char *pmu = parse_events_option_args.pmu_filter ?: "all";
metricgroup__parse_groups(evsel_list, pmu, metrics,
stat_config.metric_no_group,
stat_config.metric_no_merge,
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
&stat_config.metric_events);
zfree(&metrics);
}
if (add_default_attributes())
goto out;
if (stat_config.cgroup_list) {
if (nr_cgroups > 0) {
pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
parse_options_usage(stat_usage, stat_options, "G", 1);
parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
goto out;
}
if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
&stat_config.metric_events, true) < 0) {
parse_options_usage(stat_usage, stat_options,
"for-each-cgroup", 0);
goto out;
}
}
evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
if (evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
parse_options_usage(stat_usage, stat_options, "p", 1);
parse_options_usage(NULL, stat_options, "t", 1);
} else if (target__has_cpu(&target)) {
perror("failed to parse CPUs map");
parse_options_usage(stat_usage, stat_options, "C", 1);
parse_options_usage(NULL, stat_options, "a", 1);
}
goto out;
}
evlist__check_cpu_maps(evsel_list);
/*
* Initialize thread_map with comm names,
* so we could print it out on output.
*/
if (stat_config.aggr_mode == AGGR_THREAD) {
thread_map__read_comms(evsel_list->core.threads);
}
if (stat_config.aggr_mode == AGGR_NODE)
cpu__setup_cpunode_map();
if (stat_config.times && interval)
interval_count = true;
else if (stat_config.times && !interval) {
pr_err("interval-count option should be used together with "
"interval-print.\n");
parse_options_usage(stat_usage, stat_options, "interval-count", 0);
parse_options_usage(stat_usage, stat_options, "I", 1);
goto out;
}
if (timeout && timeout < 100) {
if (timeout < 10) {
pr_err("timeout must be >= 10ms.\n");
parse_options_usage(stat_usage, stat_options, "timeout", 0);
goto out;
} else
pr_warning("timeout < 100ms. "
"The overhead percentage could be high in some cases. "
"Please proceed with caution.\n");
}
if (timeout && interval) {
pr_err("timeout option is not supported with interval-print.\n");
parse_options_usage(stat_usage, stat_options, "timeout", 0);
parse_options_usage(stat_usage, stat_options, "I", 1);
goto out;
}
if (perf_stat_init_aggr_mode())
goto out;
if (evlist__alloc_stats(&stat_config, evsel_list, interval))
goto out;
/*
* Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
* while avoiding that older tools show confusing messages.
*
* However for pipe sessions we need to keep it zero,
* because script's perf_evsel__check_attr is triggered
* by attr->sample_type != 0, and we can't run it on
* stat sessions.
*/
stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
/*
* We dont want to block the signals - that would cause
* child tasks to inherit that and Ctrl-C would not work.
* What we want is for Ctrl-C to work in the exec()-ed
* task, but being ignored by perf stat itself:
*/
atexit(sig_atexit);
if (!forever)
signal(SIGINT, skip_signal);
signal(SIGCHLD, skip_signal);
signal(SIGALRM, skip_signal);
signal(SIGABRT, skip_signal);
if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
goto out;
/* Enable ignoring missing threads when -p option is defined. */
evlist__first(evsel_list)->ignore_missing_thread = target.pid;
status = 0;
for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
if (stat_config.run_count != 1 && verbose > 0)
fprintf(output, "[ perf stat: executing run #%d ... ]\n",
run_idx + 1);
if (run_idx != 0)
evlist__reset_prev_raw_counts(evsel_list);
status = run_perf_stat(argc, argv, run_idx);
if (forever && status != -1 && !interval) {
print_counters(NULL, argc, argv);
perf_stat__reset_stats();
}
}
if (!forever && status != -1 && (!interval || stat_config.summary)) {
if (stat_config.run_count > 1)
evlist__copy_res_stats(&stat_config, evsel_list);
print_counters(NULL, argc, argv);
}
evlist__finalize_ctlfd(evsel_list);
if (STAT_RECORD) {
/*
* We synthesize the kernel mmap record just so that older tools
* don't emit warnings about not being able to resolve symbols
* due to /proc/sys/kernel/kptr_restrict settings and instead provide
* a saner message about no samples being in the perf.data file.
*
* This also serves to suppress a warning about f_header.data.size == 0
* in header.c at the moment 'perf stat record' gets introduced, which
* is not really needed once we start adding the stat specific PERF_RECORD_
* records, but the need to suppress the kptr_restrict messages in older
* tools remain -acme
*/
int fd = perf_data__fd(&perf_stat.data);
err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
process_synthesized_event,
&perf_stat.session->machines.host);
if (err) {
pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
"older tools may produce warnings about this file\n.");
}
if (!interval) {
if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
pr_err("failed to write stat round event\n");
}
if (!perf_stat.data.is_pipe) {
perf_stat.session->header.data_size += perf_stat.bytes_written;
perf_session__write_header(perf_stat.session, evsel_list, fd, true);
}
evlist__close(evsel_list);
perf_session__delete(perf_stat.session);
}
perf_stat__exit_aggr_mode();
evlist__free_stats(evsel_list);
out:
if (stat_config.iostat_run)
iostat_release(evsel_list);
zfree(&stat_config.walltime_run);
zfree(&stat_config.user_requested_cpu_list);
if (smi_cost && smi_reset)
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
evlist__delete(evsel_list);
metricgroup__rblist_exit(&stat_config.metric_events);
evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
return status;
}
| linux-master | tools/perf/builtin-stat.c |
// SPDX-License-Identifier: GPL-2.0
#include <internal/lib.h>
#include <subcmd/parse-options.h>
#include <api/fd/array.h>
#include <api/fs/fs.h>
#include <linux/zalloc.h>
#include <linux/string.h>
#include <linux/limits.h>
#include <string.h>
#include <sys/file.h>
#include <signal.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/inotify.h>
#include <libgen.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/stat.h>
#include <sys/signalfd.h>
#include <sys/wait.h>
#include <poll.h>
#include "builtin.h"
#include "perf.h"
#include "debug.h"
#include "config.h"
#include "util.h"
#define SESSION_OUTPUT "output"
#define SESSION_CONTROL "control"
#define SESSION_ACK "ack"
/*
* Session states:
*
* OK - session is up and running
* RECONFIG - session is pending for reconfiguration,
* new values are already loaded in session object
* KILL - session is pending to be killed
*
* Session object life and its state is maintained by
* following functions:
*
* setup_server_config
* - reads config file and setup session objects
* with following states:
*
* OK - no change needed
* RECONFIG - session needs to be changed
* (run variable changed)
* KILL - session needs to be killed
* (session is no longer in config file)
*
* daemon__reconfig
* - scans session objects and does following actions
* for states:
*
* OK - skip
* RECONFIG - session is killed and re-run with new config
* KILL - session is killed
*
* - all sessions have OK state on the function exit
*/
enum daemon_session_state {
OK,
RECONFIG,
KILL,
};
struct daemon_session {
char *base;
char *name;
char *run;
char *control;
int pid;
struct list_head list;
enum daemon_session_state state;
time_t start;
};
struct daemon {
const char *config;
char *config_real;
char *config_base;
const char *csv_sep;
const char *base_user;
char *base;
struct list_head sessions;
FILE *out;
char *perf;
int signal_fd;
time_t start;
};
static struct daemon __daemon = {
.sessions = LIST_HEAD_INIT(__daemon.sessions),
};
static const char * const daemon_usage[] = {
"perf daemon {start|signal|stop|ping} [<options>]",
"perf daemon [<options>]",
NULL
};
static volatile sig_atomic_t done;
static void sig_handler(int sig __maybe_unused)
{
done = true;
}
static struct daemon_session *daemon__add_session(struct daemon *config, char *name)
{
struct daemon_session *session = zalloc(sizeof(*session));
if (!session)
return NULL;
session->name = strdup(name);
if (!session->name) {
free(session);
return NULL;
}
session->pid = -1;
list_add_tail(&session->list, &config->sessions);
return session;
}
static struct daemon_session *daemon__find_session(struct daemon *daemon, char *name)
{
struct daemon_session *session;
list_for_each_entry(session, &daemon->sessions, list) {
if (!strcmp(session->name, name))
return session;
}
return NULL;
}
static int get_session_name(const char *var, char *session, int len)
{
const char *p = var + sizeof("session-") - 1;
while (*p != '.' && *p != 0x0 && len--)
*session++ = *p++;
*session = 0;
return *p == '.' ? 0 : -EINVAL;
}
static int session_config(struct daemon *daemon, const char *var, const char *value)
{
struct daemon_session *session;
char name[100];
if (get_session_name(var, name, sizeof(name) - 1))
return -EINVAL;
var = strchr(var, '.');
if (!var)
return -EINVAL;
var++;
session = daemon__find_session(daemon, name);
if (!session) {
/* New session is defined. */
session = daemon__add_session(daemon, name);
if (!session)
return -ENOMEM;
pr_debug("reconfig: found new session %s\n", name);
/* Trigger reconfig to start it. */
session->state = RECONFIG;
} else if (session->state == KILL) {
/* Current session is defined, no action needed. */
pr_debug("reconfig: found current session %s\n", name);
session->state = OK;
}
if (!strcmp(var, "run")) {
bool same = false;
if (session->run)
same = !strcmp(session->run, value);
if (!same) {
if (session->run) {
zfree(&session->run);
pr_debug("reconfig: session %s is changed\n", name);
}
session->run = strdup(value);
if (!session->run)
return -ENOMEM;
/*
* Either new or changed run value is defined,
* trigger reconfig for the session.
*/
session->state = RECONFIG;
}
}
return 0;
}
static int server_config(const char *var, const char *value, void *cb)
{
struct daemon *daemon = cb;
if (strstarts(var, "session-")) {
return session_config(daemon, var, value);
} else if (!strcmp(var, "daemon.base") && !daemon->base_user) {
if (daemon->base && strcmp(daemon->base, value)) {
pr_err("failed: can't redefine base, bailing out\n");
return -EINVAL;
}
daemon->base = strdup(value);
if (!daemon->base)
return -ENOMEM;
}
return 0;
}
static int client_config(const char *var, const char *value, void *cb)
{
struct daemon *daemon = cb;
if (!strcmp(var, "daemon.base") && !daemon->base_user) {
daemon->base = strdup(value);
if (!daemon->base)
return -ENOMEM;
}
return 0;
}
static int check_base(struct daemon *daemon)
{
struct stat st;
if (!daemon->base) {
pr_err("failed: base not defined\n");
return -EINVAL;
}
if (stat(daemon->base, &st)) {
switch (errno) {
case EACCES:
pr_err("failed: permission denied for '%s' base\n",
daemon->base);
return -EACCES;
case ENOENT:
pr_err("failed: base '%s' does not exists\n",
daemon->base);
return -EACCES;
default:
pr_err("failed: can't access base '%s': %s\n",
daemon->base, strerror(errno));
return -errno;
}
}
if ((st.st_mode & S_IFMT) != S_IFDIR) {
pr_err("failed: base '%s' is not directory\n",
daemon->base);
return -EINVAL;
}
return 0;
}
static int setup_client_config(struct daemon *daemon)
{
struct perf_config_set *set = perf_config_set__load_file(daemon->config_real);
int err = -ENOMEM;
if (set) {
err = perf_config_set(set, client_config, daemon);
perf_config_set__delete(set);
}
return err ?: check_base(daemon);
}
static int setup_server_config(struct daemon *daemon)
{
struct perf_config_set *set;
struct daemon_session *session;
int err = -ENOMEM;
pr_debug("reconfig: started\n");
/*
* Mark all sessions for kill, the server config
* will set following states, see explanation at
* enum daemon_session_state declaration.
*/
list_for_each_entry(session, &daemon->sessions, list)
session->state = KILL;
set = perf_config_set__load_file(daemon->config_real);
if (set) {
err = perf_config_set(set, server_config, daemon);
perf_config_set__delete(set);
}
return err ?: check_base(daemon);
}
static int daemon_session__run(struct daemon_session *session,
struct daemon *daemon)
{
char buf[PATH_MAX];
char **argv;
int argc, fd;
if (asprintf(&session->base, "%s/session-%s",
daemon->base, session->name) < 0) {
perror("failed: asprintf");
return -1;
}
if (mkdir(session->base, 0755) && errno != EEXIST) {
perror("failed: mkdir");
return -1;
}
session->start = time(NULL);
session->pid = fork();
if (session->pid < 0)
return -1;
if (session->pid > 0) {
pr_info("reconfig: ruining session [%s:%d]: %s\n",
session->name, session->pid, session->run);
return 0;
}
if (chdir(session->base)) {
perror("failed: chdir");
return -1;
}
fd = open("/dev/null", O_RDONLY);
if (fd < 0) {
perror("failed: open /dev/null");
return -1;
}
dup2(fd, 0);
close(fd);
fd = open(SESSION_OUTPUT, O_RDWR|O_CREAT|O_TRUNC, 0644);
if (fd < 0) {
perror("failed: open session output");
return -1;
}
dup2(fd, 1);
dup2(fd, 2);
close(fd);
if (mkfifo(SESSION_CONTROL, 0600) && errno != EEXIST) {
perror("failed: create control fifo");
return -1;
}
if (mkfifo(SESSION_ACK, 0600) && errno != EEXIST) {
perror("failed: create ack fifo");
return -1;
}
scnprintf(buf, sizeof(buf), "%s record --control=fifo:%s,%s %s",
daemon->perf, SESSION_CONTROL, SESSION_ACK, session->run);
argv = argv_split(buf, &argc);
if (!argv)
exit(-1);
exit(execve(daemon->perf, argv, NULL));
return -1;
}
static pid_t handle_signalfd(struct daemon *daemon)
{
struct daemon_session *session;
struct signalfd_siginfo si;
ssize_t err;
int status;
pid_t pid;
/*
* Take signal fd data as pure signal notification and check all
* the sessions state. The reason is that multiple signals can get
* coalesced in kernel and we can receive only single signal even
* if multiple SIGCHLD were generated.
*/
err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo));
if (err != sizeof(struct signalfd_siginfo)) {
pr_err("failed to read signal fd\n");
return -1;
}
list_for_each_entry(session, &daemon->sessions, list) {
if (session->pid == -1)
continue;
pid = waitpid(session->pid, &status, WNOHANG);
if (pid <= 0)
continue;
if (WIFEXITED(status)) {
pr_info("session '%s' exited, status=%d\n",
session->name, WEXITSTATUS(status));
} else if (WIFSIGNALED(status)) {
pr_info("session '%s' killed (signal %d)\n",
session->name, WTERMSIG(status));
} else if (WIFSTOPPED(status)) {
pr_info("session '%s' stopped (signal %d)\n",
session->name, WSTOPSIG(status));
} else {
pr_info("session '%s' Unexpected status (0x%x)\n",
session->name, status);
}
session->state = KILL;
session->pid = -1;
}
return 0;
}
static int daemon_session__wait(struct daemon_session *session, struct daemon *daemon,
int secs)
{
struct pollfd pollfd = {
.fd = daemon->signal_fd,
.events = POLLIN,
};
time_t start;
start = time(NULL);
do {
int err = poll(&pollfd, 1, 1000);
if (err > 0) {
handle_signalfd(daemon);
} else if (err < 0) {
perror("failed: poll\n");
return -1;
}
if (start + secs < time(NULL))
return -1;
} while (session->pid != -1);
return 0;
}
static bool daemon__has_alive_session(struct daemon *daemon)
{
struct daemon_session *session;
list_for_each_entry(session, &daemon->sessions, list) {
if (session->pid != -1)
return true;
}
return false;
}
static int daemon__wait(struct daemon *daemon, int secs)
{
struct pollfd pollfd = {
.fd = daemon->signal_fd,
.events = POLLIN,
};
time_t start;
start = time(NULL);
do {
int err = poll(&pollfd, 1, 1000);
if (err > 0) {
handle_signalfd(daemon);
} else if (err < 0) {
perror("failed: poll\n");
return -1;
}
if (start + secs < time(NULL))
return -1;
} while (daemon__has_alive_session(daemon));
return 0;
}
static int daemon_session__control(struct daemon_session *session,
const char *msg, bool do_ack)
{
struct pollfd pollfd = { .events = POLLIN, };
char control_path[PATH_MAX];
char ack_path[PATH_MAX];
int control, ack = -1, len;
char buf[20];
int ret = -1;
ssize_t err;
/* open the control file */
scnprintf(control_path, sizeof(control_path), "%s/%s",
session->base, SESSION_CONTROL);
control = open(control_path, O_WRONLY|O_NONBLOCK);
if (!control)
return -1;
if (do_ack) {
/* open the ack file */
scnprintf(ack_path, sizeof(ack_path), "%s/%s",
session->base, SESSION_ACK);
ack = open(ack_path, O_RDONLY, O_NONBLOCK);
if (!ack) {
close(control);
return -1;
}
}
/* write the command */
len = strlen(msg);
err = writen(control, msg, len);
if (err != len) {
pr_err("failed: write to control pipe: %d (%s)\n",
errno, control_path);
goto out;
}
if (!do_ack)
goto out;
/* wait for an ack */
pollfd.fd = ack;
if (!poll(&pollfd, 1, 2000)) {
pr_err("failed: control ack timeout\n");
goto out;
}
if (!(pollfd.revents & POLLIN)) {
pr_err("failed: did not received an ack\n");
goto out;
}
err = read(ack, buf, sizeof(buf));
if (err > 0)
ret = strcmp(buf, "ack\n");
else
perror("failed: read ack %d\n");
out:
if (ack != -1)
close(ack);
close(control);
return ret;
}
static int setup_server_socket(struct daemon *daemon)
{
struct sockaddr_un addr;
char path[PATH_MAX];
int fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0) {
fprintf(stderr, "socket: %s\n", strerror(errno));
return -1;
}
if (fcntl(fd, F_SETFD, FD_CLOEXEC)) {
perror("failed: fcntl FD_CLOEXEC");
close(fd);
return -1;
}
scnprintf(path, sizeof(path), "%s/control", daemon->base);
if (strlen(path) + 1 >= sizeof(addr.sun_path)) {
pr_err("failed: control path too long '%s'\n", path);
close(fd);
return -1;
}
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strlcpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
unlink(path);
if (bind(fd, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
perror("failed: bind");
close(fd);
return -1;
}
if (listen(fd, 1) == -1) {
perror("failed: listen");
close(fd);
return -1;
}
return fd;
}
enum {
CMD_LIST = 0,
CMD_SIGNAL = 1,
CMD_STOP = 2,
CMD_PING = 3,
CMD_MAX,
};
#define SESSION_MAX 64
union cmd {
int cmd;
/* CMD_LIST */
struct {
int cmd;
int verbose;
char csv_sep;
} list;
/* CMD_SIGNAL */
struct {
int cmd;
int sig;
char name[SESSION_MAX];
} signal;
/* CMD_PING */
struct {
int cmd;
char name[SESSION_MAX];
} ping;
};
enum {
PING_OK = 0,
PING_FAIL = 1,
PING_MAX,
};
static int daemon_session__ping(struct daemon_session *session)
{
return daemon_session__control(session, "ping", true) ? PING_FAIL : PING_OK;
}
static int cmd_session_list(struct daemon *daemon, union cmd *cmd, FILE *out)
{
char csv_sep = cmd->list.csv_sep;
struct daemon_session *session;
time_t curr = time(NULL);
if (csv_sep) {
fprintf(out, "%d%c%s%c%s%c%s/%s",
/* pid daemon */
getpid(), csv_sep, "daemon",
/* base */
csv_sep, daemon->base,
/* output */
csv_sep, daemon->base, SESSION_OUTPUT);
fprintf(out, "%c%s/%s",
/* lock */
csv_sep, daemon->base, "lock");
fprintf(out, "%c%lu",
/* session up time */
csv_sep, (curr - daemon->start) / 60);
fprintf(out, "\n");
} else {
fprintf(out, "[%d:daemon] base: %s\n", getpid(), daemon->base);
if (cmd->list.verbose) {
fprintf(out, " output: %s/%s\n",
daemon->base, SESSION_OUTPUT);
fprintf(out, " lock: %s/lock\n",
daemon->base);
fprintf(out, " up: %lu minutes\n",
(curr - daemon->start) / 60);
}
}
list_for_each_entry(session, &daemon->sessions, list) {
if (csv_sep) {
fprintf(out, "%d%c%s%c%s",
/* pid */
session->pid,
/* name */
csv_sep, session->name,
/* base */
csv_sep, session->run);
fprintf(out, "%c%s%c%s/%s",
/* session dir */
csv_sep, session->base,
/* session output */
csv_sep, session->base, SESSION_OUTPUT);
fprintf(out, "%c%s/%s%c%s/%s",
/* session control */
csv_sep, session->base, SESSION_CONTROL,
/* session ack */
csv_sep, session->base, SESSION_ACK);
fprintf(out, "%c%lu",
/* session up time */
csv_sep, (curr - session->start) / 60);
fprintf(out, "\n");
} else {
fprintf(out, "[%d:%s] perf record %s\n",
session->pid, session->name, session->run);
if (!cmd->list.verbose)
continue;
fprintf(out, " base: %s\n",
session->base);
fprintf(out, " output: %s/%s\n",
session->base, SESSION_OUTPUT);
fprintf(out, " control: %s/%s\n",
session->base, SESSION_CONTROL);
fprintf(out, " ack: %s/%s\n",
session->base, SESSION_ACK);
fprintf(out, " up: %lu minutes\n",
(curr - session->start) / 60);
}
}
return 0;
}
static int daemon_session__signal(struct daemon_session *session, int sig)
{
if (session->pid < 0)
return -1;
return kill(session->pid, sig);
}
static int cmd_session_kill(struct daemon *daemon, union cmd *cmd, FILE *out)
{
struct daemon_session *session;
bool all = false;
all = !strcmp(cmd->signal.name, "all");
list_for_each_entry(session, &daemon->sessions, list) {
if (all || !strcmp(cmd->signal.name, session->name)) {
daemon_session__signal(session, cmd->signal.sig);
fprintf(out, "signal %d sent to session '%s [%d]'\n",
cmd->signal.sig, session->name, session->pid);
}
}
return 0;
}
static const char *ping_str[PING_MAX] = {
[PING_OK] = "OK",
[PING_FAIL] = "FAIL",
};
static int cmd_session_ping(struct daemon *daemon, union cmd *cmd, FILE *out)
{
struct daemon_session *session;
bool all = false, found = false;
all = !strcmp(cmd->ping.name, "all");
list_for_each_entry(session, &daemon->sessions, list) {
if (all || !strcmp(cmd->ping.name, session->name)) {
int state = daemon_session__ping(session);
fprintf(out, "%-4s %s\n", ping_str[state], session->name);
found = true;
}
}
if (!found && !all) {
fprintf(out, "%-4s %s (not found)\n",
ping_str[PING_FAIL], cmd->ping.name);
}
return 0;
}
static int handle_server_socket(struct daemon *daemon, int sock_fd)
{
int ret = -1, fd;
FILE *out = NULL;
union cmd cmd;
fd = accept(sock_fd, NULL, NULL);
if (fd < 0) {
perror("failed: accept");
return -1;
}
if (sizeof(cmd) != readn(fd, &cmd, sizeof(cmd))) {
perror("failed: read");
goto out;
}
out = fdopen(fd, "w");
if (!out) {
perror("failed: fdopen");
goto out;
}
switch (cmd.cmd) {
case CMD_LIST:
ret = cmd_session_list(daemon, &cmd, out);
break;
case CMD_SIGNAL:
ret = cmd_session_kill(daemon, &cmd, out);
break;
case CMD_STOP:
done = 1;
ret = 0;
pr_debug("perf daemon is exciting\n");
break;
case CMD_PING:
ret = cmd_session_ping(daemon, &cmd, out);
break;
default:
break;
}
fclose(out);
out:
/* If out is defined, then fd is closed via fclose. */
if (!out)
close(fd);
return ret;
}
static int setup_client_socket(struct daemon *daemon)
{
struct sockaddr_un addr;
char path[PATH_MAX];
int fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd == -1) {
perror("failed: socket");
return -1;
}
scnprintf(path, sizeof(path), "%s/control", daemon->base);
if (strlen(path) + 1 >= sizeof(addr.sun_path)) {
pr_err("failed: control path too long '%s'\n", path);
close(fd);
return -1;
}
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strlcpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) {
perror("failed: connect");
close(fd);
return -1;
}
return fd;
}
static void daemon_session__kill(struct daemon_session *session,
struct daemon *daemon)
{
int how = 0;
do {
switch (how) {
case 0:
daemon_session__control(session, "stop", false);
break;
case 1:
daemon_session__signal(session, SIGTERM);
break;
case 2:
daemon_session__signal(session, SIGKILL);
break;
default:
pr_err("failed to wait for session %s\n",
session->name);
return;
}
how++;
} while (daemon_session__wait(session, daemon, 10));
}
static void daemon__signal(struct daemon *daemon, int sig)
{
struct daemon_session *session;
list_for_each_entry(session, &daemon->sessions, list)
daemon_session__signal(session, sig);
}
static void daemon_session__delete(struct daemon_session *session)
{
zfree(&session->base);
zfree(&session->name);
zfree(&session->run);
free(session);
}
static void daemon_session__remove(struct daemon_session *session)
{
list_del(&session->list);
daemon_session__delete(session);
}
static void daemon__stop(struct daemon *daemon)
{
struct daemon_session *session;
list_for_each_entry(session, &daemon->sessions, list)
daemon_session__control(session, "stop", false);
}
static void daemon__kill(struct daemon *daemon)
{
int how = 0;
do {
switch (how) {
case 0:
daemon__stop(daemon);
break;
case 1:
daemon__signal(daemon, SIGTERM);
break;
case 2:
daemon__signal(daemon, SIGKILL);
break;
default:
pr_err("failed to wait for sessions\n");
return;
}
how++;
} while (daemon__wait(daemon, 10));
}
static void daemon__exit(struct daemon *daemon)
{
struct daemon_session *session, *h;
list_for_each_entry_safe(session, h, &daemon->sessions, list)
daemon_session__remove(session);
zfree(&daemon->config_real);
zfree(&daemon->config_base);
zfree(&daemon->base);
}
static int daemon__reconfig(struct daemon *daemon)
{
struct daemon_session *session, *n;
list_for_each_entry_safe(session, n, &daemon->sessions, list) {
/* No change. */
if (session->state == OK)
continue;
/* Remove session. */
if (session->state == KILL) {
if (session->pid > 0) {
daemon_session__kill(session, daemon);
pr_info("reconfig: session '%s' killed\n", session->name);
}
daemon_session__remove(session);
continue;
}
/* Reconfig session. */
if (session->pid > 0) {
daemon_session__kill(session, daemon);
pr_info("reconfig: session '%s' killed\n", session->name);
}
if (daemon_session__run(session, daemon))
return -1;
session->state = OK;
}
return 0;
}
static int setup_config_changes(struct daemon *daemon)
{
char *basen = strdup(daemon->config_real);
char *dirn = strdup(daemon->config_real);
char *base, *dir;
int fd, wd = -1;
if (!dirn || !basen)
goto out;
fd = inotify_init1(IN_NONBLOCK|O_CLOEXEC);
if (fd < 0) {
perror("failed: inotify_init");
goto out;
}
dir = dirname(dirn);
base = basename(basen);
pr_debug("config file: %s, dir: %s\n", base, dir);
wd = inotify_add_watch(fd, dir, IN_CLOSE_WRITE);
if (wd >= 0) {
daemon->config_base = strdup(base);
if (!daemon->config_base) {
close(fd);
wd = -1;
}
} else {
perror("failed: inotify_add_watch");
}
out:
free(basen);
free(dirn);
return wd < 0 ? -1 : fd;
}
static bool process_inotify_event(struct daemon *daemon, char *buf, ssize_t len)
{
char *p = buf;
while (p < (buf + len)) {
struct inotify_event *event = (struct inotify_event *) p;
/*
* We monitor config directory, check if our
* config file was changes.
*/
if ((event->mask & IN_CLOSE_WRITE) &&
!(event->mask & IN_ISDIR)) {
if (!strcmp(event->name, daemon->config_base))
return true;
}
p += sizeof(*event) + event->len;
}
return false;
}
static int handle_config_changes(struct daemon *daemon, int conf_fd,
bool *config_changed)
{
char buf[4096];
ssize_t len;
while (!(*config_changed)) {
len = read(conf_fd, buf, sizeof(buf));
if (len == -1) {
if (errno != EAGAIN) {
perror("failed: read");
return -1;
}
return 0;
}
*config_changed = process_inotify_event(daemon, buf, len);
}
return 0;
}
static int setup_config(struct daemon *daemon)
{
if (daemon->base_user) {
daemon->base = strdup(daemon->base_user);
if (!daemon->base)
return -ENOMEM;
}
if (daemon->config) {
char *real = realpath(daemon->config, NULL);
if (!real) {
perror("failed: realpath");
return -1;
}
daemon->config_real = real;
return 0;
}
if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK))
daemon->config_real = strdup(perf_etc_perfconfig());
else if (perf_config_global() && perf_home_perfconfig())
daemon->config_real = strdup(perf_home_perfconfig());
return daemon->config_real ? 0 : -1;
}
#ifndef F_TLOCK
#define F_TLOCK 2
static int lockf(int fd, int cmd, off_t len)
{
if (cmd != F_TLOCK || len != 0)
return -1;
return flock(fd, LOCK_EX | LOCK_NB);
}
#endif // F_TLOCK
/*
* Each daemon tries to create and lock BASE/lock file,
* if it's successful we are sure we're the only daemon
* running over the BASE.
*
* Once daemon is finished, file descriptor to lock file
* is closed and lock is released.
*/
static int check_lock(struct daemon *daemon)
{
char path[PATH_MAX];
char buf[20];
int fd, pid;
ssize_t len;
scnprintf(path, sizeof(path), "%s/lock", daemon->base);
fd = open(path, O_RDWR|O_CREAT|O_CLOEXEC, 0640);
if (fd < 0)
return -1;
if (lockf(fd, F_TLOCK, 0) < 0) {
filename__read_int(path, &pid);
fprintf(stderr, "failed: another perf daemon (pid %d) owns %s\n",
pid, daemon->base);
close(fd);
return -1;
}
scnprintf(buf, sizeof(buf), "%d", getpid());
len = strlen(buf);
if (write(fd, buf, len) != len) {
perror("failed: write");
close(fd);
return -1;
}
if (ftruncate(fd, len)) {
perror("failed: ftruncate");
close(fd);
return -1;
}
return 0;
}
static int go_background(struct daemon *daemon)
{
int pid, fd;
pid = fork();
if (pid < 0)
return -1;
if (pid > 0)
return 1;
if (setsid() < 0)
return -1;
if (check_lock(daemon))
return -1;
umask(0);
if (chdir(daemon->base)) {
perror("failed: chdir");
return -1;
}
fd = open("output", O_RDWR|O_CREAT|O_TRUNC, 0644);
if (fd < 0) {
perror("failed: open");
return -1;
}
if (fcntl(fd, F_SETFD, FD_CLOEXEC)) {
perror("failed: fcntl FD_CLOEXEC");
close(fd);
return -1;
}
close(0);
dup2(fd, 1);
dup2(fd, 2);
close(fd);
daemon->out = fdopen(1, "w");
if (!daemon->out) {
close(1);
close(2);
return -1;
}
setbuf(daemon->out, NULL);
return 0;
}
static int setup_signalfd(struct daemon *daemon)
{
sigset_t mask;
sigemptyset(&mask);
sigaddset(&mask, SIGCHLD);
if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1)
return -1;
daemon->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
return daemon->signal_fd;
}
static int __cmd_start(struct daemon *daemon, struct option parent_options[],
int argc, const char **argv)
{
bool foreground = false;
struct option start_options[] = {
OPT_BOOLEAN('f', "foreground", &foreground, "stay on console"),
OPT_PARENT(parent_options),
OPT_END()
};
int sock_fd = -1, conf_fd = -1, signal_fd = -1;
int sock_pos, file_pos, signal_pos;
struct fdarray fda;
int err = 0;
argc = parse_options(argc, argv, start_options, daemon_usage, 0);
if (argc)
usage_with_options(daemon_usage, start_options);
daemon->start = time(NULL);
if (setup_config(daemon)) {
pr_err("failed: config not found\n");
return -1;
}
if (setup_server_config(daemon))
return -1;
if (foreground && check_lock(daemon))
return -1;
if (!foreground) {
err = go_background(daemon);
if (err) {
/* original process, exit normally */
if (err == 1)
err = 0;
daemon__exit(daemon);
return err;
}
}
debug_set_file(daemon->out);
debug_set_display_time(true);
pr_info("daemon started (pid %d)\n", getpid());
fdarray__init(&fda, 3);
sock_fd = setup_server_socket(daemon);
if (sock_fd < 0)
goto out;
conf_fd = setup_config_changes(daemon);
if (conf_fd < 0)
goto out;
signal_fd = setup_signalfd(daemon);
if (signal_fd < 0)
goto out;
sock_pos = fdarray__add(&fda, sock_fd, POLLIN|POLLERR|POLLHUP, 0);
if (sock_pos < 0)
goto out;
file_pos = fdarray__add(&fda, conf_fd, POLLIN|POLLERR|POLLHUP, 0);
if (file_pos < 0)
goto out;
signal_pos = fdarray__add(&fda, signal_fd, POLLIN|POLLERR|POLLHUP, 0);
if (signal_pos < 0)
goto out;
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
signal(SIGPIPE, SIG_IGN);
while (!done && !err) {
err = daemon__reconfig(daemon);
if (!err && fdarray__poll(&fda, -1)) {
bool reconfig = false;
if (fda.entries[sock_pos].revents & POLLIN)
err = handle_server_socket(daemon, sock_fd);
if (fda.entries[file_pos].revents & POLLIN)
err = handle_config_changes(daemon, conf_fd, &reconfig);
if (fda.entries[signal_pos].revents & POLLIN)
err = handle_signalfd(daemon) < 0;
if (reconfig)
err = setup_server_config(daemon);
}
}
out:
fdarray__exit(&fda);
daemon__kill(daemon);
daemon__exit(daemon);
if (sock_fd != -1)
close(sock_fd);
if (conf_fd != -1)
close(conf_fd);
if (signal_fd != -1)
close(signal_fd);
pr_info("daemon exited\n");
fclose(daemon->out);
return err;
}
static int send_cmd(struct daemon *daemon, union cmd *cmd)
{
int ret = -1, fd;
char *line = NULL;
size_t len = 0;
ssize_t nread;
FILE *in = NULL;
if (setup_client_config(daemon))
return -1;
fd = setup_client_socket(daemon);
if (fd < 0)
return -1;
if (sizeof(*cmd) != writen(fd, cmd, sizeof(*cmd))) {
perror("failed: write");
goto out;
}
in = fdopen(fd, "r");
if (!in) {
perror("failed: fdopen");
goto out;
}
while ((nread = getline(&line, &len, in)) != -1) {
if (fwrite(line, nread, 1, stdout) != 1)
goto out_fclose;
fflush(stdout);
}
ret = 0;
out_fclose:
fclose(in);
free(line);
out:
/* If in is defined, then fd is closed via fclose. */
if (!in)
close(fd);
return ret;
}
static int send_cmd_list(struct daemon *daemon)
{
union cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.list.cmd = CMD_LIST;
cmd.list.verbose = verbose;
cmd.list.csv_sep = daemon->csv_sep ? *daemon->csv_sep : 0;
return send_cmd(daemon, &cmd);
}
static int __cmd_signal(struct daemon *daemon, struct option parent_options[],
int argc, const char **argv)
{
const char *name = "all";
struct option start_options[] = {
OPT_STRING(0, "session", &name, "session",
"Sent signal to specific session"),
OPT_PARENT(parent_options),
OPT_END()
};
union cmd cmd;
argc = parse_options(argc, argv, start_options, daemon_usage, 0);
if (argc)
usage_with_options(daemon_usage, start_options);
if (setup_config(daemon)) {
pr_err("failed: config not found\n");
return -1;
}
memset(&cmd, 0, sizeof(cmd));
cmd.signal.cmd = CMD_SIGNAL,
cmd.signal.sig = SIGUSR2;
strncpy(cmd.signal.name, name, sizeof(cmd.signal.name) - 1);
return send_cmd(daemon, &cmd);
}
static int __cmd_stop(struct daemon *daemon, struct option parent_options[],
int argc, const char **argv)
{
struct option start_options[] = {
OPT_PARENT(parent_options),
OPT_END()
};
union cmd cmd;
argc = parse_options(argc, argv, start_options, daemon_usage, 0);
if (argc)
usage_with_options(daemon_usage, start_options);
if (setup_config(daemon)) {
pr_err("failed: config not found\n");
return -1;
}
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_STOP;
return send_cmd(daemon, &cmd);
}
static int __cmd_ping(struct daemon *daemon, struct option parent_options[],
int argc, const char **argv)
{
const char *name = "all";
struct option ping_options[] = {
OPT_STRING(0, "session", &name, "session",
"Ping to specific session"),
OPT_PARENT(parent_options),
OPT_END()
};
union cmd cmd;
argc = parse_options(argc, argv, ping_options, daemon_usage, 0);
if (argc)
usage_with_options(daemon_usage, ping_options);
if (setup_config(daemon)) {
pr_err("failed: config not found\n");
return -1;
}
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_PING;
scnprintf(cmd.ping.name, sizeof(cmd.ping.name), "%s", name);
return send_cmd(daemon, &cmd);
}
static char *alloc_perf_exe_path(void)
{
char path[PATH_MAX];
perf_exe(path, sizeof(path));
return strdup(path);
}
int cmd_daemon(int argc, const char **argv)
{
struct option daemon_options[] = {
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_STRING(0, "config", &__daemon.config,
"config file", "config file path"),
OPT_STRING(0, "base", &__daemon.base_user,
"directory", "base directory"),
OPT_STRING_OPTARG('x', "field-separator", &__daemon.csv_sep,
"field separator", "print counts with custom separator", ","),
OPT_END()
};
int ret = -1;
__daemon.perf = alloc_perf_exe_path();
if (!__daemon.perf)
return -ENOMEM;
__daemon.out = stdout;
argc = parse_options(argc, argv, daemon_options, daemon_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (argc) {
if (!strcmp(argv[0], "start"))
ret = __cmd_start(&__daemon, daemon_options, argc, argv);
else if (!strcmp(argv[0], "signal"))
ret = __cmd_signal(&__daemon, daemon_options, argc, argv);
else if (!strcmp(argv[0], "stop"))
ret = __cmd_stop(&__daemon, daemon_options, argc, argv);
else if (!strcmp(argv[0], "ping"))
ret = __cmd_ping(&__daemon, daemon_options, argc, argv);
else
pr_err("failed: unknown command '%s'\n", argv[0]);
} else {
ret = setup_config(&__daemon);
if (ret)
pr_err("failed: config not found\n");
else
ret = send_cmd_list(&__daemon);
}
zfree(&__daemon.perf);
return ret;
}
| linux-master | tools/perf/builtin-daemon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-kwork.c
*
* Copyright (c) 2022 Huawei Inc, Yang Jihong <[email protected]>
*/
#include "builtin.h"
#include "util/data.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/header.h"
#include "util/kwork.h"
#include "util/debug.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/string2.h"
#include "util/callchain.h"
#include "util/evsel_fprintf.h"
#include "util/util.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include <traceevent/event-parse.h>
#include <errno.h>
#include <inttypes.h>
#include <signal.h>
#include <linux/err.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
/*
* report header elements width
*/
#define PRINT_CPU_WIDTH 4
#define PRINT_COUNT_WIDTH 9
#define PRINT_RUNTIME_WIDTH 10
#define PRINT_LATENCY_WIDTH 10
#define PRINT_TIMESTAMP_WIDTH 17
#define PRINT_KWORK_NAME_WIDTH 30
#define RPINT_DECIMAL_WIDTH 3
#define PRINT_BRACKETPAIR_WIDTH 2
#define PRINT_TIME_UNIT_SEC_WIDTH 2
#define PRINT_TIME_UNIT_MESC_WIDTH 3
#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
struct sort_dimension {
const char *name;
int (*cmp)(struct kwork_work *l, struct kwork_work *r);
struct list_head list;
};
static int id_cmp(struct kwork_work *l, struct kwork_work *r)
{
if (l->cpu > r->cpu)
return 1;
if (l->cpu < r->cpu)
return -1;
if (l->id > r->id)
return 1;
if (l->id < r->id)
return -1;
return 0;
}
static int count_cmp(struct kwork_work *l, struct kwork_work *r)
{
if (l->nr_atoms > r->nr_atoms)
return 1;
if (l->nr_atoms < r->nr_atoms)
return -1;
return 0;
}
static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
{
if (l->total_runtime > r->total_runtime)
return 1;
if (l->total_runtime < r->total_runtime)
return -1;
return 0;
}
static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
{
if (l->max_runtime > r->max_runtime)
return 1;
if (l->max_runtime < r->max_runtime)
return -1;
return 0;
}
static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
{
u64 avgl, avgr;
if (!r->nr_atoms)
return 1;
if (!l->nr_atoms)
return -1;
avgl = l->total_latency / l->nr_atoms;
avgr = r->total_latency / r->nr_atoms;
if (avgl > avgr)
return 1;
if (avgl < avgr)
return -1;
return 0;
}
static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
{
if (l->max_latency > r->max_latency)
return 1;
if (l->max_latency < r->max_latency)
return -1;
return 0;
}
static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
const char *tok, struct list_head *list)
{
size_t i;
static struct sort_dimension max_sort_dimension = {
.name = "max",
.cmp = max_runtime_cmp,
};
static struct sort_dimension id_sort_dimension = {
.name = "id",
.cmp = id_cmp,
};
static struct sort_dimension runtime_sort_dimension = {
.name = "runtime",
.cmp = runtime_cmp,
};
static struct sort_dimension count_sort_dimension = {
.name = "count",
.cmp = count_cmp,
};
static struct sort_dimension avg_sort_dimension = {
.name = "avg",
.cmp = avg_latency_cmp,
};
struct sort_dimension *available_sorts[] = {
&id_sort_dimension,
&max_sort_dimension,
&count_sort_dimension,
&runtime_sort_dimension,
&avg_sort_dimension,
};
if (kwork->report == KWORK_REPORT_LATENCY)
max_sort_dimension.cmp = max_latency_cmp;
for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
if (!strcmp(available_sorts[i]->name, tok)) {
list_add_tail(&available_sorts[i]->list, list);
return 0;
}
}
return -1;
}
static void setup_sorting(struct perf_kwork *kwork,
const struct option *options,
const char * const usage_msg[])
{
char *tmp, *tok, *str = strdup(kwork->sort_order);
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
usage_with_options_msg(usage_msg, options,
"Unknown --sort key: `%s'", tok);
}
pr_debug("Sort order: %s\n", kwork->sort_order);
free(str);
}
static struct kwork_atom *atom_new(struct perf_kwork *kwork,
struct perf_sample *sample)
{
unsigned long i;
struct kwork_atom_page *page;
struct kwork_atom *atom = NULL;
list_for_each_entry(page, &kwork->atom_page_list, list) {
if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
BUG_ON(i >= NR_ATOM_PER_PAGE);
atom = &page->atoms[i];
goto found_atom;
}
}
/*
* new page
*/
page = zalloc(sizeof(*page));
if (page == NULL) {
pr_err("Failed to zalloc kwork atom page\n");
return NULL;
}
i = 0;
atom = &page->atoms[0];
list_add_tail(&page->list, &kwork->atom_page_list);
found_atom:
__set_bit(i, page->bitmap);
atom->time = sample->time;
atom->prev = NULL;
atom->page_addr = page;
atom->bit_inpage = i;
return atom;
}
static void atom_free(struct kwork_atom *atom)
{
if (atom->prev != NULL)
atom_free(atom->prev);
__clear_bit(atom->bit_inpage,
((struct kwork_atom_page *)atom->page_addr)->bitmap);
}
static void atom_del(struct kwork_atom *atom)
{
list_del(&atom->list);
atom_free(atom);
}
static int work_cmp(struct list_head *list,
struct kwork_work *l, struct kwork_work *r)
{
int ret = 0;
struct sort_dimension *sort;
BUG_ON(list_empty(list));
list_for_each_entry(sort, list, list) {
ret = sort->cmp(l, r);
if (ret)
return ret;
}
return ret;
}
static struct kwork_work *work_search(struct rb_root_cached *root,
struct kwork_work *key,
struct list_head *sort_list)
{
int cmp;
struct kwork_work *work;
struct rb_node *node = root->rb_root.rb_node;
while (node) {
work = container_of(node, struct kwork_work, node);
cmp = work_cmp(sort_list, key, work);
if (cmp > 0)
node = node->rb_left;
else if (cmp < 0)
node = node->rb_right;
else {
if (work->name == NULL)
work->name = key->name;
return work;
}
}
return NULL;
}
static void work_insert(struct rb_root_cached *root,
struct kwork_work *key, struct list_head *sort_list)
{
int cmp;
bool leftmost = true;
struct kwork_work *cur;
struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
while (*new) {
cur = container_of(*new, struct kwork_work, node);
parent = *new;
cmp = work_cmp(sort_list, key, cur);
if (cmp > 0)
new = &((*new)->rb_left);
else {
new = &((*new)->rb_right);
leftmost = false;
}
}
rb_link_node(&key->node, parent, new);
rb_insert_color_cached(&key->node, root, leftmost);
}
static struct kwork_work *work_new(struct kwork_work *key)
{
int i;
struct kwork_work *work = zalloc(sizeof(*work));
if (work == NULL) {
pr_err("Failed to zalloc kwork work\n");
return NULL;
}
for (i = 0; i < KWORK_TRACE_MAX; i++)
INIT_LIST_HEAD(&work->atom_list[i]);
work->id = key->id;
work->cpu = key->cpu;
work->name = key->name;
work->class = key->class;
return work;
}
static struct kwork_work *work_findnew(struct rb_root_cached *root,
struct kwork_work *key,
struct list_head *sort_list)
{
struct kwork_work *work = work_search(root, key, sort_list);
if (work != NULL)
return work;
work = work_new(key);
if (work)
work_insert(root, work, sort_list);
return work;
}
static void profile_update_timespan(struct perf_kwork *kwork,
struct perf_sample *sample)
{
if (!kwork->summary)
return;
if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
kwork->timestart = sample->time;
if (kwork->timeend < sample->time)
kwork->timeend = sample->time;
}
static bool profile_event_match(struct perf_kwork *kwork,
struct kwork_work *work,
struct perf_sample *sample)
{
int cpu = work->cpu;
u64 time = sample->time;
struct perf_time_interval *ptime = &kwork->ptime;
if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
return false;
if (((ptime->start != 0) && (ptime->start > time)) ||
((ptime->end != 0) && (ptime->end < time)))
return false;
if ((kwork->profile_name != NULL) &&
(work->name != NULL) &&
(strcmp(work->name, kwork->profile_name) != 0))
return false;
profile_update_timespan(kwork, sample);
return true;
}
static int work_push_atom(struct perf_kwork *kwork,
struct kwork_class *class,
enum kwork_trace_type src_type,
enum kwork_trace_type dst_type,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine,
struct kwork_work **ret_work)
{
struct kwork_atom *atom, *dst_atom;
struct kwork_work *work, key;
BUG_ON(class->work_init == NULL);
class->work_init(class, &key, evsel, sample, machine);
atom = atom_new(kwork, sample);
if (atom == NULL)
return -1;
work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
if (work == NULL) {
free(atom);
return -1;
}
if (!profile_event_match(kwork, work, sample))
return 0;
if (dst_type < KWORK_TRACE_MAX) {
dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
struct kwork_atom, list);
if (dst_atom != NULL) {
atom->prev = dst_atom;
list_del(&dst_atom->list);
}
}
if (ret_work != NULL)
*ret_work = work;
list_add_tail(&atom->list, &work->atom_list[src_type]);
return 0;
}
static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
struct kwork_class *class,
enum kwork_trace_type src_type,
enum kwork_trace_type dst_type,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine,
struct kwork_work **ret_work)
{
struct kwork_atom *atom, *src_atom;
struct kwork_work *work, key;
BUG_ON(class->work_init == NULL);
class->work_init(class, &key, evsel, sample, machine);
work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
if (ret_work != NULL)
*ret_work = work;
if (work == NULL)
return NULL;
if (!profile_event_match(kwork, work, sample))
return NULL;
atom = list_last_entry_or_null(&work->atom_list[dst_type],
struct kwork_atom, list);
if (atom != NULL)
return atom;
src_atom = atom_new(kwork, sample);
if (src_atom != NULL)
list_add_tail(&src_atom->list, &work->atom_list[src_type]);
else {
if (ret_work != NULL)
*ret_work = NULL;
}
return NULL;
}
static void report_update_exit_event(struct kwork_work *work,
struct kwork_atom *atom,
struct perf_sample *sample)
{
u64 delta;
u64 exit_time = sample->time;
u64 entry_time = atom->time;
if ((entry_time != 0) && (exit_time >= entry_time)) {
delta = exit_time - entry_time;
if ((delta > work->max_runtime) ||
(work->max_runtime == 0)) {
work->max_runtime = delta;
work->max_runtime_start = entry_time;
work->max_runtime_end = exit_time;
}
work->total_runtime += delta;
work->nr_atoms++;
}
}
static int report_entry_event(struct perf_kwork *kwork,
struct kwork_class *class,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
KWORK_TRACE_MAX, evsel, sample,
machine, NULL);
}
static int report_exit_event(struct perf_kwork *kwork,
struct kwork_class *class,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct kwork_atom *atom = NULL;
struct kwork_work *work = NULL;
atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
KWORK_TRACE_ENTRY, evsel, sample,
machine, &work);
if (work == NULL)
return -1;
if (atom != NULL) {
report_update_exit_event(work, atom, sample);
atom_del(atom);
}
return 0;
}
static void latency_update_entry_event(struct kwork_work *work,
struct kwork_atom *atom,
struct perf_sample *sample)
{
u64 delta;
u64 entry_time = sample->time;
u64 raise_time = atom->time;
if ((raise_time != 0) && (entry_time >= raise_time)) {
delta = entry_time - raise_time;
if ((delta > work->max_latency) ||
(work->max_latency == 0)) {
work->max_latency = delta;
work->max_latency_start = raise_time;
work->max_latency_end = entry_time;
}
work->total_latency += delta;
work->nr_atoms++;
}
}
static int latency_raise_event(struct perf_kwork *kwork,
struct kwork_class *class,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
KWORK_TRACE_MAX, evsel, sample,
machine, NULL);
}
static int latency_entry_event(struct perf_kwork *kwork,
struct kwork_class *class,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct kwork_atom *atom = NULL;
struct kwork_work *work = NULL;
atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
KWORK_TRACE_RAISE, evsel, sample,
machine, &work);
if (work == NULL)
return -1;
if (atom != NULL) {
latency_update_entry_event(work, atom, sample);
atom_del(atom);
}
return 0;
}
static void timehist_save_callchain(struct perf_kwork *kwork,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct symbol *sym;
struct thread *thread;
struct callchain_cursor_node *node;
struct callchain_cursor *cursor;
if (!kwork->show_callchain || sample->callchain == NULL)
return;
/* want main thread for process - has maps */
thread = machine__findnew_thread(machine, sample->pid, sample->pid);
if (thread == NULL) {
pr_debug("Failed to get thread for pid %d\n", sample->pid);
return;
}
cursor = get_tls_callchain_cursor();
if (thread__resolve_callchain(thread, cursor, evsel, sample,
NULL, NULL, kwork->max_stack + 2) != 0) {
pr_debug("Failed to resolve callchain, skipping\n");
goto out_put;
}
callchain_cursor_commit(cursor);
while (true) {
node = callchain_cursor_current(cursor);
if (node == NULL)
break;
sym = node->ms.sym;
if (sym) {
if (!strcmp(sym->name, "__softirqentry_text_start") ||
!strcmp(sym->name, "__do_softirq"))
sym->ignore = 1;
}
callchain_cursor_advance(cursor);
}
out_put:
thread__put(thread);
}
static void timehist_print_event(struct perf_kwork *kwork,
struct kwork_work *work,
struct kwork_atom *atom,
struct perf_sample *sample,
struct addr_location *al)
{
char entrytime[32], exittime[32];
char kwork_name[PRINT_KWORK_NAME_WIDTH];
/*
* runtime start
*/
timestamp__scnprintf_usec(atom->time,
entrytime, sizeof(entrytime));
printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
/*
* runtime end
*/
timestamp__scnprintf_usec(sample->time,
exittime, sizeof(exittime));
printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
/*
* cpu
*/
printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
/*
* kwork name
*/
if (work->class && work->class->work_name) {
work->class->work_name(work, kwork_name,
PRINT_KWORK_NAME_WIDTH);
printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
} else
printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
/*
*runtime
*/
printf(" %*.*f ",
PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
(double)(sample->time - atom->time) / NSEC_PER_MSEC);
/*
* delaytime
*/
if (atom->prev != NULL)
printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
(double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
else
printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
/*
* callchain
*/
if (kwork->show_callchain) {
struct callchain_cursor *cursor = get_tls_callchain_cursor();
if (cursor == NULL)
return;
printf(" ");
sample__fprintf_sym(sample, al, 0,
EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
EVSEL__PRINT_CALLCHAIN_ARROW |
EVSEL__PRINT_SKIP_IGNORED,
cursor, symbol_conf.bt_stop_list,
stdout);
}
printf("\n");
}
static int timehist_raise_event(struct perf_kwork *kwork,
struct kwork_class *class,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
KWORK_TRACE_MAX, evsel, sample,
machine, NULL);
}
static int timehist_entry_event(struct perf_kwork *kwork,
struct kwork_class *class,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
int ret;
struct kwork_work *work = NULL;
ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
KWORK_TRACE_RAISE, evsel, sample,
machine, &work);
if (ret)
return ret;
if (work != NULL)
timehist_save_callchain(kwork, sample, evsel, machine);
return 0;
}
static int timehist_exit_event(struct perf_kwork *kwork,
struct kwork_class *class,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct kwork_atom *atom = NULL;
struct kwork_work *work = NULL;
struct addr_location al;
int ret = 0;
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
pr_debug("Problem processing event, skipping it\n");
ret = -1;
goto out;
}
atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
KWORK_TRACE_ENTRY, evsel, sample,
machine, &work);
if (work == NULL) {
ret = -1;
goto out;
}
if (atom != NULL) {
work->nr_atoms++;
timehist_print_event(kwork, work, atom, sample, &al);
atom_del(atom);
}
out:
addr_location__exit(&al);
return ret;
}
static struct kwork_class kwork_irq;
static int process_irq_handler_entry_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->entry_event)
return kwork->tp_handler->entry_event(kwork, &kwork_irq,
evsel, sample, machine);
return 0;
}
static int process_irq_handler_exit_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->exit_event)
return kwork->tp_handler->exit_event(kwork, &kwork_irq,
evsel, sample, machine);
return 0;
}
const struct evsel_str_handler irq_tp_handlers[] = {
{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
{ "irq:irq_handler_exit", process_irq_handler_exit_event, },
};
static int irq_class_init(struct kwork_class *class,
struct perf_session *session)
{
if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
pr_err("Failed to set irq tracepoints handlers\n");
return -1;
}
class->work_root = RB_ROOT_CACHED;
return 0;
}
static void irq_work_init(struct kwork_class *class,
struct kwork_work *work,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
work->class = class;
work->cpu = sample->cpu;
work->id = evsel__intval(evsel, sample, "irq");
work->name = evsel__strval(evsel, sample, "name");
}
static void irq_work_name(struct kwork_work *work, char *buf, int len)
{
snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
}
static struct kwork_class kwork_irq = {
.name = "irq",
.type = KWORK_CLASS_IRQ,
.nr_tracepoints = 2,
.tp_handlers = irq_tp_handlers,
.class_init = irq_class_init,
.work_init = irq_work_init,
.work_name = irq_work_name,
};
static struct kwork_class kwork_softirq;
static int process_softirq_raise_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->raise_event)
return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
evsel, sample, machine);
return 0;
}
static int process_softirq_entry_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->entry_event)
return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
evsel, sample, machine);
return 0;
}
static int process_softirq_exit_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->exit_event)
return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
evsel, sample, machine);
return 0;
}
const struct evsel_str_handler softirq_tp_handlers[] = {
{ "irq:softirq_raise", process_softirq_raise_event, },
{ "irq:softirq_entry", process_softirq_entry_event, },
{ "irq:softirq_exit", process_softirq_exit_event, },
};
static int softirq_class_init(struct kwork_class *class,
struct perf_session *session)
{
if (perf_session__set_tracepoints_handlers(session,
softirq_tp_handlers)) {
pr_err("Failed to set softirq tracepoints handlers\n");
return -1;
}
class->work_root = RB_ROOT_CACHED;
return 0;
}
static char *evsel__softirq_name(struct evsel *evsel, u64 num)
{
char *name = NULL;
bool found = false;
struct tep_print_flag_sym *sym = NULL;
struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
if ((args == NULL) || (args->next == NULL))
return NULL;
/* skip softirq field: "REC->vec" */
for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
if ((eval_flag(sym->value) == (unsigned long long)num) &&
(strlen(sym->str) != 0)) {
found = true;
break;
}
}
if (!found)
return NULL;
name = strdup(sym->str);
if (name == NULL) {
pr_err("Failed to copy symbol name\n");
return NULL;
}
return name;
}
static void softirq_work_init(struct kwork_class *class,
struct kwork_work *work,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
u64 num = evsel__intval(evsel, sample, "vec");
work->id = num;
work->class = class;
work->cpu = sample->cpu;
work->name = evsel__softirq_name(evsel, num);
}
static void softirq_work_name(struct kwork_work *work, char *buf, int len)
{
snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
}
static struct kwork_class kwork_softirq = {
.name = "softirq",
.type = KWORK_CLASS_SOFTIRQ,
.nr_tracepoints = 3,
.tp_handlers = softirq_tp_handlers,
.class_init = softirq_class_init,
.work_init = softirq_work_init,
.work_name = softirq_work_name,
};
static struct kwork_class kwork_workqueue;
static int process_workqueue_activate_work_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->raise_event)
return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
evsel, sample, machine);
return 0;
}
static int process_workqueue_execute_start_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->entry_event)
return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
evsel, sample, machine);
return 0;
}
static int process_workqueue_execute_end_event(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
if (kwork->tp_handler->exit_event)
return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
evsel, sample, machine);
return 0;
}
const struct evsel_str_handler workqueue_tp_handlers[] = {
{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
{ "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, },
};
static int workqueue_class_init(struct kwork_class *class,
struct perf_session *session)
{
if (perf_session__set_tracepoints_handlers(session,
workqueue_tp_handlers)) {
pr_err("Failed to set workqueue tracepoints handlers\n");
return -1;
}
class->work_root = RB_ROOT_CACHED;
return 0;
}
static void workqueue_work_init(struct kwork_class *class,
struct kwork_work *work,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
{
char *modp = NULL;
unsigned long long function_addr = evsel__intval(evsel,
sample, "function");
work->class = class;
work->cpu = sample->cpu;
work->id = evsel__intval(evsel, sample, "work");
work->name = function_addr == 0 ? NULL :
machine__resolve_kernel_addr(machine, &function_addr, &modp);
}
static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
{
if (work->name != NULL)
snprintf(buf, len, "(w)%s", work->name);
else
snprintf(buf, len, "(w)0x%" PRIx64, work->id);
}
static struct kwork_class kwork_workqueue = {
.name = "workqueue",
.type = KWORK_CLASS_WORKQUEUE,
.nr_tracepoints = 3,
.tp_handlers = workqueue_tp_handlers,
.class_init = workqueue_class_init,
.work_init = workqueue_work_init,
.work_name = workqueue_work_name,
};
static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
[KWORK_CLASS_IRQ] = &kwork_irq,
[KWORK_CLASS_SOFTIRQ] = &kwork_softirq,
[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
};
static void print_separator(int len)
{
printf(" %.*s\n", len, graph_dotted_line);
}
static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
{
int ret = 0;
char kwork_name[PRINT_KWORK_NAME_WIDTH];
char max_runtime_start[32], max_runtime_end[32];
char max_latency_start[32], max_latency_end[32];
printf(" ");
/*
* kwork name
*/
if (work->class && work->class->work_name) {
work->class->work_name(work, kwork_name,
PRINT_KWORK_NAME_WIDTH);
ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
} else {
ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
}
/*
* cpu
*/
ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
/*
* total runtime
*/
if (kwork->report == KWORK_REPORT_RUNTIME) {
ret += printf(" %*.*f ms |",
PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
(double)work->total_runtime / NSEC_PER_MSEC);
} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
ret += printf(" %*.*f ms |",
PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
(double)work->total_latency /
work->nr_atoms / NSEC_PER_MSEC);
}
/*
* count
*/
ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
/*
* max runtime, max runtime start, max runtime end
*/
if (kwork->report == KWORK_REPORT_RUNTIME) {
timestamp__scnprintf_usec(work->max_runtime_start,
max_runtime_start,
sizeof(max_runtime_start));
timestamp__scnprintf_usec(work->max_runtime_end,
max_runtime_end,
sizeof(max_runtime_end));
ret += printf(" %*.*f ms | %*s s | %*s s |",
PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
(double)work->max_runtime / NSEC_PER_MSEC,
PRINT_TIMESTAMP_WIDTH, max_runtime_start,
PRINT_TIMESTAMP_WIDTH, max_runtime_end);
}
/*
* max delay, max delay start, max delay end
*/
else if (kwork->report == KWORK_REPORT_LATENCY) {
timestamp__scnprintf_usec(work->max_latency_start,
max_latency_start,
sizeof(max_latency_start));
timestamp__scnprintf_usec(work->max_latency_end,
max_latency_end,
sizeof(max_latency_end));
ret += printf(" %*.*f ms | %*s s | %*s s |",
PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
(double)work->max_latency / NSEC_PER_MSEC,
PRINT_TIMESTAMP_WIDTH, max_latency_start,
PRINT_TIMESTAMP_WIDTH, max_latency_end);
}
printf("\n");
return ret;
}
static int report_print_header(struct perf_kwork *kwork)
{
int ret;
printf("\n ");
ret = printf(" %-*s | %-*s |",
PRINT_KWORK_NAME_WIDTH, "Kwork Name",
PRINT_CPU_WIDTH, "Cpu");
if (kwork->report == KWORK_REPORT_RUNTIME) {
ret += printf(" %-*s |",
PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
} else if (kwork->report == KWORK_REPORT_LATENCY) {
ret += printf(" %-*s |",
PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
}
ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
if (kwork->report == KWORK_REPORT_RUNTIME) {
ret += printf(" %-*s | %-*s | %-*s |",
PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
} else if (kwork->report == KWORK_REPORT_LATENCY) {
ret += printf(" %-*s | %-*s | %-*s |",
PRINT_LATENCY_HEADER_WIDTH, "Max delay",
PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
}
printf("\n");
print_separator(ret);
return ret;
}
static void timehist_print_header(void)
{
/*
* header row
*/
printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
PRINT_TIMESTAMP_WIDTH, "Runtime start",
PRINT_TIMESTAMP_WIDTH, "Runtime end",
PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
PRINT_KWORK_NAME_WIDTH, "Kwork name",
PRINT_RUNTIME_WIDTH, "Runtime",
PRINT_RUNTIME_WIDTH, "Delaytime");
/*
* units row
*/
printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
PRINT_TIMESTAMP_WIDTH, "",
PRINT_TIMESTAMP_WIDTH, "",
PRINT_TIMEHIST_CPU_WIDTH, "",
PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
PRINT_RUNTIME_WIDTH, "(msec)",
PRINT_RUNTIME_WIDTH, "(msec)");
/*
* separator
*/
printf(" %.*s %.*s %.*s %.*s %.*s %.*s\n",
PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
PRINT_RUNTIME_WIDTH, graph_dotted_line,
PRINT_RUNTIME_WIDTH, graph_dotted_line);
}
static void print_summary(struct perf_kwork *kwork)
{
u64 time = kwork->timeend - kwork->timestart;
printf(" Total count : %9" PRIu64 "\n", kwork->all_count);
printf(" Total runtime (msec) : %9.3f (%.3f%% load average)\n",
(double)kwork->all_runtime / NSEC_PER_MSEC,
time == 0 ? 0 : (double)kwork->all_runtime / time);
printf(" Total time span (msec) : %9.3f\n",
(double)time / NSEC_PER_MSEC);
}
static unsigned long long nr_list_entry(struct list_head *head)
{
struct list_head *pos;
unsigned long long n = 0;
list_for_each(pos, head)
n++;
return n;
}
static void print_skipped_events(struct perf_kwork *kwork)
{
int i;
const char *const kwork_event_str[] = {
[KWORK_TRACE_RAISE] = "raise",
[KWORK_TRACE_ENTRY] = "entry",
[KWORK_TRACE_EXIT] = "exit",
};
if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
(kwork->nr_events != 0)) {
printf(" INFO: %.3f%% skipped events (%" PRIu64 " including ",
(double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
(double)kwork->nr_events * 100.0,
kwork->nr_skipped_events[KWORK_TRACE_MAX]);
for (i = 0; i < KWORK_TRACE_MAX; i++) {
printf("%" PRIu64 " %s%s",
kwork->nr_skipped_events[i],
kwork_event_str[i],
(i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
}
}
if (verbose > 0)
printf(" INFO: use %lld atom pages\n",
nr_list_entry(&kwork->atom_page_list));
}
static void print_bad_events(struct perf_kwork *kwork)
{
if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
(double)kwork->nr_lost_events /
(double)kwork->nr_events * 100.0,
kwork->nr_lost_events, kwork->nr_events,
kwork->nr_lost_chunks);
}
}
static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
{
struct rb_node *node;
struct kwork_work *data;
struct rb_root_cached *root = &class->work_root;
pr_debug("Sorting %s ...\n", class->name);
for (;;) {
node = rb_first_cached(root);
if (!node)
break;
rb_erase_cached(node, root);
data = rb_entry(node, struct kwork_work, node);
work_insert(&kwork->sorted_work_root,
data, &kwork->sort_list);
}
}
static void perf_kwork__sort(struct perf_kwork *kwork)
{
struct kwork_class *class;
list_for_each_entry(class, &kwork->class_list, list)
work_sort(kwork, class);
}
static int perf_kwork__check_config(struct perf_kwork *kwork,
struct perf_session *session)
{
int ret;
struct evsel *evsel;
struct kwork_class *class;
static struct trace_kwork_handler report_ops = {
.entry_event = report_entry_event,
.exit_event = report_exit_event,
};
static struct trace_kwork_handler latency_ops = {
.raise_event = latency_raise_event,
.entry_event = latency_entry_event,
};
static struct trace_kwork_handler timehist_ops = {
.raise_event = timehist_raise_event,
.entry_event = timehist_entry_event,
.exit_event = timehist_exit_event,
};
switch (kwork->report) {
case KWORK_REPORT_RUNTIME:
kwork->tp_handler = &report_ops;
break;
case KWORK_REPORT_LATENCY:
kwork->tp_handler = &latency_ops;
break;
case KWORK_REPORT_TIMEHIST:
kwork->tp_handler = &timehist_ops;
break;
default:
pr_debug("Invalid report type %d\n", kwork->report);
return -1;
}
list_for_each_entry(class, &kwork->class_list, list)
if ((class->class_init != NULL) &&
(class->class_init(class, session) != 0))
return -1;
if (kwork->cpu_list != NULL) {
ret = perf_session__cpu_bitmap(session,
kwork->cpu_list,
kwork->cpu_bitmap);
if (ret < 0) {
pr_err("Invalid cpu bitmap\n");
return -1;
}
}
if (kwork->time_str != NULL) {
ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
if (ret != 0) {
pr_err("Invalid time span\n");
return -1;
}
}
list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
pr_debug("Samples do not have callchains\n");
kwork->show_callchain = 0;
symbol_conf.use_callchain = 0;
}
}
return 0;
}
static int perf_kwork__read_events(struct perf_kwork *kwork)
{
int ret = -1;
struct perf_session *session = NULL;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = kwork->force,
};
session = perf_session__new(&data, &kwork->tool);
if (IS_ERR(session)) {
pr_debug("Error creating perf session\n");
return PTR_ERR(session);
}
symbol__init(&session->header.env);
if (perf_kwork__check_config(kwork, session) != 0)
goto out_delete;
if (session->tevent.pevent &&
tep_set_function_resolver(session->tevent.pevent,
machine__resolve_kernel_addr,
&session->machines.host) < 0) {
pr_err("Failed to set libtraceevent function resolver\n");
goto out_delete;
}
if (kwork->report == KWORK_REPORT_TIMEHIST)
timehist_print_header();
ret = perf_session__process_events(session);
if (ret) {
pr_debug("Failed to process events, error %d\n", ret);
goto out_delete;
}
kwork->nr_events = session->evlist->stats.nr_events[0];
kwork->nr_lost_events = session->evlist->stats.total_lost;
kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
out_delete:
perf_session__delete(session);
return ret;
}
static void process_skipped_events(struct perf_kwork *kwork,
struct kwork_work *work)
{
int i;
unsigned long long count;
for (i = 0; i < KWORK_TRACE_MAX; i++) {
count = nr_list_entry(&work->atom_list[i]);
kwork->nr_skipped_events[i] += count;
kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
}
}
struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
struct kwork_class *class,
struct kwork_work *key)
{
struct kwork_work *work = NULL;
work = work_new(key);
if (work == NULL)
return NULL;
work_insert(&class->work_root, work, &kwork->cmp_id);
return work;
}
static void sig_handler(int sig)
{
/*
* Simply capture termination signal so that
* the program can continue after pause returns
*/
pr_debug("Captuer signal %d\n", sig);
}
static int perf_kwork__report_bpf(struct perf_kwork *kwork)
{
int ret;
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
ret = perf_kwork__trace_prepare_bpf(kwork);
if (ret)
return -1;
printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
perf_kwork__trace_start();
/*
* a simple pause, wait here for stop signal
*/
pause();
perf_kwork__trace_finish();
perf_kwork__report_read_bpf(kwork);
perf_kwork__report_cleanup_bpf();
return 0;
}
static int perf_kwork__report(struct perf_kwork *kwork)
{
int ret;
struct rb_node *next;
struct kwork_work *work;
if (kwork->use_bpf)
ret = perf_kwork__report_bpf(kwork);
else
ret = perf_kwork__read_events(kwork);
if (ret != 0)
return -1;
perf_kwork__sort(kwork);
setup_pager();
ret = report_print_header(kwork);
next = rb_first_cached(&kwork->sorted_work_root);
while (next) {
work = rb_entry(next, struct kwork_work, node);
process_skipped_events(kwork, work);
if (work->nr_atoms != 0) {
report_print_work(kwork, work);
if (kwork->summary) {
kwork->all_runtime += work->total_runtime;
kwork->all_count += work->nr_atoms;
}
}
next = rb_next(next);
}
print_separator(ret);
if (kwork->summary) {
print_summary(kwork);
print_separator(ret);
}
print_bad_events(kwork);
print_skipped_events(kwork);
printf("\n");
return 0;
}
typedef int (*tracepoint_handler)(struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
int err = 0;
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
err = f(tool, evsel, sample, machine);
}
return err;
}
static int perf_kwork__timehist(struct perf_kwork *kwork)
{
/*
* event handlers for timehist option
*/
kwork->tool.comm = perf_event__process_comm;
kwork->tool.exit = perf_event__process_exit;
kwork->tool.fork = perf_event__process_fork;
kwork->tool.attr = perf_event__process_attr;
kwork->tool.tracing_data = perf_event__process_tracing_data;
kwork->tool.build_id = perf_event__process_build_id;
kwork->tool.ordered_events = true;
kwork->tool.ordering_requires_timestamps = true;
symbol_conf.use_callchain = kwork->show_callchain;
if (symbol__validate_sym_arguments()) {
pr_err("Failed to validate sym arguments\n");
return -1;
}
setup_pager();
return perf_kwork__read_events(kwork);
}
static void setup_event_list(struct perf_kwork *kwork,
const struct option *options,
const char * const usage_msg[])
{
int i;
struct kwork_class *class;
char *tmp, *tok, *str;
if (kwork->event_list_str == NULL)
goto null_event_list_str;
str = strdup(kwork->event_list_str);
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
for (i = 0; i < KWORK_CLASS_MAX; i++) {
class = kwork_class_supported_list[i];
if (strcmp(tok, class->name) == 0) {
list_add_tail(&class->list, &kwork->class_list);
break;
}
}
if (i == KWORK_CLASS_MAX) {
usage_with_options_msg(usage_msg, options,
"Unknown --event key: `%s'", tok);
}
}
free(str);
null_event_list_str:
/*
* config all kwork events if not specified
*/
if (list_empty(&kwork->class_list)) {
for (i = 0; i < KWORK_CLASS_MAX; i++) {
list_add_tail(&kwork_class_supported_list[i]->list,
&kwork->class_list);
}
}
pr_debug("Config event list:");
list_for_each_entry(class, &kwork->class_list, list)
pr_debug(" %s", class->name);
pr_debug("\n");
}
static int perf_kwork__record(struct perf_kwork *kwork,
int argc, const char **argv)
{
const char **rec_argv;
unsigned int rec_argc, i, j;
struct kwork_class *class;
const char *const record_args[] = {
"record",
"-a",
"-R",
"-m", "1024",
"-c", "1",
};
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
list_for_each_entry(class, &kwork->class_list, list)
rec_argc += 2 * class->nr_tracepoints;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
list_for_each_entry(class, &kwork->class_list, list) {
for (j = 0; j < class->nr_tracepoints; j++) {
rec_argv[i++] = strdup("-e");
rec_argv[i++] = strdup(class->tp_handlers[j].name);
}
}
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
BUG_ON(i != rec_argc);
pr_debug("record comm: ");
for (j = 0; j < rec_argc; j++)
pr_debug("%s ", rec_argv[j]);
pr_debug("\n");
return cmd_record(i, rec_argv);
}
int cmd_kwork(int argc, const char **argv)
{
static struct perf_kwork kwork = {
.class_list = LIST_HEAD_INIT(kwork.class_list),
.tool = {
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.sample = perf_kwork__process_tracepoint_sample,
},
.atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
.sort_list = LIST_HEAD_INIT(kwork.sort_list),
.cmp_id = LIST_HEAD_INIT(kwork.cmp_id),
.sorted_work_root = RB_ROOT_CACHED,
.tp_handler = NULL,
.profile_name = NULL,
.cpu_list = NULL,
.time_str = NULL,
.force = false,
.event_list_str = NULL,
.summary = false,
.sort_order = NULL,
.show_callchain = false,
.max_stack = 5,
.timestart = 0,
.timeend = 0,
.nr_events = 0,
.nr_lost_chunks = 0,
.nr_lost_events = 0,
.all_runtime = 0,
.all_count = 0,
.nr_skipped_events = { 0 },
};
static const char default_report_sort_order[] = "runtime, max, count";
static const char default_latency_sort_order[] = "avg, max, count";
const struct option kwork_options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
"list of kwork to profile (irq, softirq, workqueue, etc)"),
OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
OPT_END()
};
const struct option report_options[] = {
OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
"sort by key(s): runtime, max, count"),
OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
"list of cpus to profile"),
OPT_STRING('n', "name", &kwork.profile_name, "name",
"event name to profile"),
OPT_STRING(0, "time", &kwork.time_str, "str",
"Time span for analysis (start,stop)"),
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_BOOLEAN('S', "with-summary", &kwork.summary,
"Show summary with statistics"),
#ifdef HAVE_BPF_SKEL
OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
"Use BPF to measure kwork runtime"),
#endif
OPT_PARENT(kwork_options)
};
const struct option latency_options[] = {
OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
"sort by key(s): avg, max, count"),
OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
"list of cpus to profile"),
OPT_STRING('n', "name", &kwork.profile_name, "name",
"event name to profile"),
OPT_STRING(0, "time", &kwork.time_str, "str",
"Time span for analysis (start,stop)"),
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
#ifdef HAVE_BPF_SKEL
OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
"Use BPF to measure kwork latency"),
#endif
OPT_PARENT(kwork_options)
};
const struct option timehist_options[] = {
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
"Display call chains if present"),
OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
"Maximum number of functions to display backtrace."),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_STRING(0, "time", &kwork.time_str, "str",
"Time span for analysis (start,stop)"),
OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
"list of cpus to profile"),
OPT_STRING('n', "name", &kwork.profile_name, "name",
"event name to profile"),
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_PARENT(kwork_options)
};
const char *kwork_usage[] = {
NULL,
NULL
};
const char * const report_usage[] = {
"perf kwork report [<options>]",
NULL
};
const char * const latency_usage[] = {
"perf kwork latency [<options>]",
NULL
};
const char * const timehist_usage[] = {
"perf kwork timehist [<options>]",
NULL
};
const char *const kwork_subcommands[] = {
"record", "report", "latency", "timehist", NULL
};
argc = parse_options_subcommand(argc, argv, kwork_options,
kwork_subcommands, kwork_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(kwork_usage, kwork_options);
setup_event_list(&kwork, kwork_options, kwork_usage);
sort_dimension__add(&kwork, "id", &kwork.cmp_id);
if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
return perf_kwork__record(&kwork, argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
kwork.sort_order = default_report_sort_order;
if (argc > 1) {
argc = parse_options(argc, argv, report_options, report_usage, 0);
if (argc)
usage_with_options(report_usage, report_options);
}
kwork.report = KWORK_REPORT_RUNTIME;
setup_sorting(&kwork, report_options, report_usage);
return perf_kwork__report(&kwork);
} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
kwork.sort_order = default_latency_sort_order;
if (argc > 1) {
argc = parse_options(argc, argv, latency_options, latency_usage, 0);
if (argc)
usage_with_options(latency_usage, latency_options);
}
kwork.report = KWORK_REPORT_LATENCY;
setup_sorting(&kwork, latency_options, latency_usage);
return perf_kwork__report(&kwork);
} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
if (argc > 1) {
argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
if (argc)
usage_with_options(timehist_usage, timehist_options);
}
kwork.report = KWORK_REPORT_TIMEHIST;
return perf_kwork__timehist(&kwork);
} else
usage_with_options(kwork_usage, kwork_options);
return 0;
}
| linux-master | tools/perf/builtin-kwork.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-config.c
*
* Copyright (C) 2015, Taeung Song <[email protected]>
*
*/
#include "builtin.h"
#include "util/cache.h"
#include <subcmd/parse-options.h>
#include "util/debug.h"
#include "util/config.h"
#include <linux/string.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
static bool use_system_config, use_user_config;
static const char * const config_usage[] = {
"perf config [<file-option>] [options] [section.name[=value] ...]",
NULL
};
enum actions {
ACTION_LIST = 1
} actions;
static struct option config_options[] = {
OPT_SET_UINT('l', "list", &actions,
"show current config variables", ACTION_LIST),
OPT_BOOLEAN(0, "system", &use_system_config, "use system config file"),
OPT_BOOLEAN(0, "user", &use_user_config, "use user config file"),
OPT_END()
};
static int set_config(struct perf_config_set *set, const char *file_name)
{
struct perf_config_section *section = NULL;
struct perf_config_item *item = NULL;
const char *first_line = "# this file is auto-generated.";
FILE *fp;
if (set == NULL)
return -1;
fp = fopen(file_name, "w");
if (!fp)
return -1;
fprintf(fp, "%s\n", first_line);
/* overwrite configvariables */
perf_config_items__for_each_entry(&set->sections, section) {
if (!use_system_config && section->from_system_config)
continue;
fprintf(fp, "[%s]\n", section->name);
perf_config_items__for_each_entry(§ion->items, item) {
if (!use_system_config && item->from_system_config)
continue;
if (item->value)
fprintf(fp, "\t%s = %s\n",
item->name, item->value);
}
}
fclose(fp);
return 0;
}
static int show_spec_config(struct perf_config_set *set, const char *var)
{
struct perf_config_section *section;
struct perf_config_item *item;
if (set == NULL)
return -1;
perf_config_items__for_each_entry(&set->sections, section) {
if (!strstarts(var, section->name))
continue;
perf_config_items__for_each_entry(§ion->items, item) {
const char *name = var + strlen(section->name) + 1;
if (strcmp(name, item->name) == 0) {
char *value = item->value;
if (value) {
printf("%s=%s\n", var, value);
return 0;
}
}
}
}
return 0;
}
static int show_config(struct perf_config_set *set)
{
struct perf_config_section *section;
struct perf_config_item *item;
if (set == NULL)
return -1;
perf_config_set__for_each_entry(set, section, item) {
char *value = item->value;
if (value)
printf("%s.%s=%s\n", section->name,
item->name, value);
}
return 0;
}
static int parse_config_arg(char *arg, char **var, char **value)
{
const char *last_dot = strchr(arg, '.');
/*
* Since "var" actually contains the section name and the real
* config variable name separated by a dot, we have to know where the dot is.
*/
if (last_dot == NULL || last_dot == arg) {
pr_err("The config variable does not contain a section name: %s\n", arg);
return -1;
}
if (!last_dot[1]) {
pr_err("The config variable does not contain a variable name: %s\n", arg);
return -1;
}
*value = strchr(arg, '=');
if (*value == NULL)
*var = arg;
else if (!strcmp(*value, "=")) {
pr_err("The config variable does not contain a value: %s\n", arg);
return -1;
} else {
*value = *value + 1; /* excluding a first character '=' */
*var = strsep(&arg, "=");
if (*var[0] == '\0') {
pr_err("invalid config variable: %s\n", arg);
return -1;
}
}
return 0;
}
int cmd_config(int argc, const char **argv)
{
int i, ret = -1;
struct perf_config_set *set;
char path[PATH_MAX];
char *user_config = mkpath(path, sizeof(path), "%s/.perfconfig", getenv("HOME"));
const char *config_filename;
bool changed = false;
argc = parse_options(argc, argv, config_options, config_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (use_system_config && use_user_config) {
pr_err("Error: only one config file at a time\n");
parse_options_usage(config_usage, config_options, "user", 0);
parse_options_usage(NULL, config_options, "system", 0);
return -1;
}
if (use_system_config)
config_exclusive_filename = perf_etc_perfconfig();
else if (use_user_config)
config_exclusive_filename = user_config;
if (!config_exclusive_filename)
config_filename = user_config;
else
config_filename = config_exclusive_filename;
/*
* At only 'config' sub-command, individually use the config set
* because of reinitializing with options config file location.
*/
set = perf_config_set__new();
if (!set)
goto out_err;
switch (actions) {
case ACTION_LIST:
if (argc) {
pr_err("Error: takes no arguments\n");
parse_options_usage(config_usage, config_options, "l", 1);
} else {
do_action_list:
if (show_config(set) < 0) {
pr_err("Nothing configured, "
"please check your %s \n", config_filename);
goto out_err;
}
}
break;
default:
if (!argc)
goto do_action_list;
for (i = 0; argv[i]; i++) {
char *var, *value;
char *arg = strdup(argv[i]);
if (!arg) {
pr_err("%s: strdup failed\n", __func__);
goto out_err;
}
if (parse_config_arg(arg, &var, &value) < 0) {
free(arg);
goto out_err;
}
if (value == NULL) {
if (show_spec_config(set, var) < 0) {
pr_err("%s is not configured: %s\n",
var, config_filename);
free(arg);
goto out_err;
}
} else {
if (perf_config_set__collect(set, config_filename,
var, value) < 0) {
pr_err("Failed to add '%s=%s'\n",
var, value);
free(arg);
goto out_err;
}
changed = true;
}
free(arg);
}
if (!changed)
break;
if (set_config(set, config_filename) < 0) {
pr_err("Failed to set the configs on %s\n",
config_filename);
goto out_err;
}
}
ret = 0;
out_err:
perf_config_set__delete(set);
return ret;
}
| linux-master | tools/perf/builtin-config.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-record.c
*
* Builtin record command: Record the profile of a workload
* (or a CPU, or a PID) into the perf.data output file - for
* later analysis via perf report.
*/
#include "builtin.h"
#include "util/build-id.h"
#include <subcmd/parse-options.h>
#include <internal/xyarray.h>
#include "util/parse-events.h"
#include "util/config.h"
#include "util/callchain.h"
#include "util/cgroup.h"
#include "util/header.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/mmap.h"
#include "util/mutex.h"
#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/symbol.h"
#include "util/record.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/data.h"
#include "util/perf_regs.h"
#include "util/auxtrace.h"
#include "util/tsc.h"
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
#include "util/perf_api_probe.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
#include "util/cpu-set-sched.h"
#include "util/synthetic-events.h"
#include "util/time-utils.h"
#include "util/units.h"
#include "util/bpf-event.h"
#include "util/util.h"
#include "util/pfm.h"
#include "util/pmu.h"
#include "util/pmus.h"
#include "util/clockid.h"
#include "util/off_cpu.h"
#include "util/bpf-filter.h"
#include "asm/bug.h"
#include "perf.h"
#include "cputopo.h"
#include <errno.h>
#include <inttypes.h>
#include <locale.h>
#include <poll.h>
#include <pthread.h>
#include <unistd.h>
#ifndef HAVE_GETTID
#include <syscall.h>
#endif
#include <sched.h>
#include <signal.h>
#ifdef HAVE_EVENTFD_SUPPORT
#include <sys/eventfd.h>
#endif
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <linux/bitmap.h>
#include <sys/time.h>
struct switch_output {
bool enabled;
bool signal;
unsigned long size;
unsigned long time;
const char *str;
bool set;
char **filenames;
int num_files;
int cur_file;
};
struct thread_mask {
struct mmap_cpu_mask maps;
struct mmap_cpu_mask affinity;
};
struct record_thread {
pid_t tid;
struct thread_mask *mask;
struct {
int msg[2];
int ack[2];
} pipes;
struct fdarray pollfd;
int ctlfd_pos;
int nr_mmaps;
struct mmap **maps;
struct mmap **overwrite_maps;
struct record *rec;
unsigned long long samples;
unsigned long waking;
u64 bytes_written;
u64 bytes_transferred;
u64 bytes_compressed;
};
static __thread struct record_thread *thread;
enum thread_msg {
THREAD_MSG__UNDEFINED = 0,
THREAD_MSG__READY,
THREAD_MSG__MAX,
};
static const char *thread_msg_tags[THREAD_MSG__MAX] = {
"UNDEFINED", "READY"
};
enum thread_spec {
THREAD_SPEC__UNDEFINED = 0,
THREAD_SPEC__CPU,
THREAD_SPEC__CORE,
THREAD_SPEC__PACKAGE,
THREAD_SPEC__NUMA,
THREAD_SPEC__USER,
THREAD_SPEC__MAX,
};
static const char *thread_spec_tags[THREAD_SPEC__MAX] = {
"undefined", "cpu", "core", "package", "numa", "user"
};
struct pollfd_index_map {
int evlist_pollfd_index;
int thread_pollfd_index;
};
struct record {
struct perf_tool tool;
struct record_opts opts;
u64 bytes_written;
u64 thread_bytes_written;
struct perf_data data;
struct auxtrace_record *itr;
struct evlist *evlist;
struct perf_session *session;
struct evlist *sb_evlist;
pthread_t thread_id;
int realtime_prio;
bool switch_output_event_set;
bool no_buildid;
bool no_buildid_set;
bool no_buildid_cache;
bool no_buildid_cache_set;
bool buildid_all;
bool buildid_mmap;
bool timestamp_filename;
bool timestamp_boundary;
bool off_cpu;
struct switch_output switch_output;
unsigned long long samples;
unsigned long output_max_size; /* = 0: unlimited */
struct perf_debuginfod debuginfod;
int nr_threads;
struct thread_mask *thread_masks;
struct record_thread *thread_data;
struct pollfd_index_map *index_map;
size_t index_map_sz;
size_t index_map_cnt;
};
static volatile int done;
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
static const char *affinity_tags[PERF_AFFINITY_MAX] = {
"SYS", "NODE", "CPU"
};
#ifndef HAVE_GETTID
static inline pid_t gettid(void)
{
return (pid_t)syscall(__NR_gettid);
}
#endif
static int record__threads_enabled(struct record *rec)
{
return rec->opts.threads_spec;
}
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
trigger_is_ready(&switch_output_trigger);
}
static bool switch_output_size(struct record *rec)
{
return rec->switch_output.size &&
trigger_is_ready(&switch_output_trigger) &&
(rec->bytes_written >= rec->switch_output.size);
}
static bool switch_output_time(struct record *rec)
{
return rec->switch_output.time &&
trigger_is_ready(&switch_output_trigger);
}
static u64 record__bytes_written(struct record *rec)
{
return rec->bytes_written + rec->thread_bytes_written;
}
static bool record__output_max_size_exceeded(struct record *rec)
{
return rec->output_max_size &&
(record__bytes_written(rec) >= rec->output_max_size);
}
static int record__write(struct record *rec, struct mmap *map __maybe_unused,
void *bf, size_t size)
{
struct perf_data_file *file = &rec->session->data->file;
if (map && map->file)
file = map->file;
if (perf_data_file__write(file, bf, size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
if (map && map->file) {
thread->bytes_written += size;
rec->thread_bytes_written += size;
} else {
rec->bytes_written += size;
}
if (record__output_max_size_exceeded(rec) && !done) {
fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
" stopping session ]\n",
record__bytes_written(rec) >> 10);
done = 1;
}
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
return 0;
}
static int record__aio_enabled(struct record *rec);
static int record__comp_enabled(struct record *rec);
static size_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size);
#ifdef HAVE_AIO_SUPPORT
static int record__aio_write(struct aiocb *cblock, int trace_fd,
void *buf, size_t size, off_t off)
{
int rc;
cblock->aio_fildes = trace_fd;
cblock->aio_buf = buf;
cblock->aio_nbytes = size;
cblock->aio_offset = off;
cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
do {
rc = aio_write(cblock);
if (rc == 0) {
break;
} else if (errno != EAGAIN) {
cblock->aio_fildes = -1;
pr_err("failed to queue perf data, error: %m\n");
break;
}
} while (1);
return rc;
}
static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
{
void *rem_buf;
off_t rem_off;
size_t rem_size;
int rc, aio_errno;
ssize_t aio_ret, written;
aio_errno = aio_error(cblock);
if (aio_errno == EINPROGRESS)
return 0;
written = aio_ret = aio_return(cblock);
if (aio_ret < 0) {
if (aio_errno != EINTR)
pr_err("failed to write perf data, error: %m\n");
written = 0;
}
rem_size = cblock->aio_nbytes - written;
if (rem_size == 0) {
cblock->aio_fildes = -1;
/*
* md->refcount is incremented in record__aio_pushfn() for
* every aio write request started in record__aio_push() so
* decrement it because the request is now complete.
*/
perf_mmap__put(&md->core);
rc = 1;
} else {
/*
* aio write request may require restart with the
* reminder if the kernel didn't write whole
* chunk at once.
*/
rem_off = cblock->aio_offset + written;
rem_buf = (void *)(cblock->aio_buf + written);
record__aio_write(cblock, cblock->aio_fildes,
rem_buf, rem_size, rem_off);
rc = 0;
}
return rc;
}
static int record__aio_sync(struct mmap *md, bool sync_all)
{
struct aiocb **aiocb = md->aio.aiocb;
struct aiocb *cblocks = md->aio.cblocks;
struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
int i, do_suspend;
do {
do_suspend = 0;
for (i = 0; i < md->aio.nr_cblocks; ++i) {
if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
if (sync_all)
aiocb[i] = NULL;
else
return i;
} else {
/*
* Started aio write is not complete yet
* so it has to be waited before the
* next allocation.
*/
aiocb[i] = &cblocks[i];
do_suspend = 1;
}
}
if (!do_suspend)
return -1;
while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
if (!(errno == EAGAIN || errno == EINTR))
pr_err("failed to sync perf data, error: %m\n");
}
} while (1);
}
struct record_aio {
struct record *rec;
void *data;
size_t size;
};
static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
{
struct record_aio *aio = to;
/*
* map->core.base data pointed by buf is copied into free map->aio.data[] buffer
* to release space in the kernel buffer as fast as possible, calling
* perf_mmap__consume() from perf_mmap__push() function.
*
* That lets the kernel to proceed with storing more profiling data into
* the kernel buffer earlier than other per-cpu kernel buffers are handled.
*
* Coping can be done in two steps in case the chunk of profiling data
* crosses the upper bound of the kernel buffer. In this case we first move
* part of data from map->start till the upper bound and then the reminder
* from the beginning of the kernel buffer till the end of the data chunk.
*/
if (record__comp_enabled(aio->rec)) {
size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
mmap__mmap_len(map) - aio->size,
buf, size);
} else {
memcpy(aio->data + aio->size, buf, size);
}
if (!aio->size) {
/*
* Increment map->refcount to guard map->aio.data[] buffer
* from premature deallocation because map object can be
* released earlier than aio write request started on
* map->aio.data[] buffer is complete.
*
* perf_mmap__put() is done at record__aio_complete()
* after started aio request completion or at record__aio_push()
* if the request failed to start.
*/
perf_mmap__get(&map->core);
}
aio->size += size;
return size;
}
static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
{
int ret, idx;
int trace_fd = rec->session->data->file.fd;
struct record_aio aio = { .rec = rec, .size = 0 };
/*
* Call record__aio_sync() to wait till map->aio.data[] buffer
* becomes available after previous aio write operation.
*/
idx = record__aio_sync(map, false);
aio.data = map->aio.data[idx];
ret = perf_mmap__push(map, &aio, record__aio_pushfn);
if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
return ret;
rec->samples++;
ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
if (!ret) {
*off += aio.size;
rec->bytes_written += aio.size;
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
} else {
/*
* Decrement map->refcount incremented in record__aio_pushfn()
* back if record__aio_write() operation failed to start, otherwise
* map->refcount is decremented in record__aio_complete() after
* aio write operation finishes successfully.
*/
perf_mmap__put(&map->core);
}
return ret;
}
static off_t record__aio_get_pos(int trace_fd)
{
return lseek(trace_fd, 0, SEEK_CUR);
}
static void record__aio_set_pos(int trace_fd, off_t pos)
{
lseek(trace_fd, pos, SEEK_SET);
}
static void record__aio_mmap_read_sync(struct record *rec)
{
int i;
struct evlist *evlist = rec->evlist;
struct mmap *maps = evlist->mmap;
if (!record__aio_enabled(rec))
return;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *map = &maps[i];
if (map->core.base)
record__aio_sync(map, true);
}
}
static int nr_cblocks_default = 1;
static int nr_cblocks_max = 4;
static int record__aio_parse(const struct option *opt,
const char *str,
int unset)
{
struct record_opts *opts = (struct record_opts *)opt->value;
if (unset) {
opts->nr_cblocks = 0;
} else {
if (str)
opts->nr_cblocks = strtol(str, NULL, 0);
if (!opts->nr_cblocks)
opts->nr_cblocks = nr_cblocks_default;
}
return 0;
}
#else /* HAVE_AIO_SUPPORT */
static int nr_cblocks_max = 0;
static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
off_t *off __maybe_unused)
{
return -1;
}
static off_t record__aio_get_pos(int trace_fd __maybe_unused)
{
return -1;
}
static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
{
}
static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
{
}
#endif
static int record__aio_enabled(struct record *rec)
{
return rec->opts.nr_cblocks > 0;
}
#define MMAP_FLUSH_DEFAULT 1
static int record__mmap_flush_parse(const struct option *opt,
const char *str,
int unset)
{
int flush_max;
struct record_opts *opts = (struct record_opts *)opt->value;
static struct parse_tag tags[] = {
{ .tag = 'B', .mult = 1 },
{ .tag = 'K', .mult = 1 << 10 },
{ .tag = 'M', .mult = 1 << 20 },
{ .tag = 'G', .mult = 1 << 30 },
{ .tag = 0 },
};
if (unset)
return 0;
if (str) {
opts->mmap_flush = parse_tag_value(str, tags);
if (opts->mmap_flush == (int)-1)
opts->mmap_flush = strtol(str, NULL, 0);
}
if (!opts->mmap_flush)
opts->mmap_flush = MMAP_FLUSH_DEFAULT;
flush_max = evlist__mmap_size(opts->mmap_pages);
flush_max /= 4;
if (opts->mmap_flush > flush_max)
opts->mmap_flush = flush_max;
return 0;
}
#ifdef HAVE_ZSTD_SUPPORT
static unsigned int comp_level_default = 1;
static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
{
struct record_opts *opts = opt->value;
if (unset) {
opts->comp_level = 0;
} else {
if (str)
opts->comp_level = strtol(str, NULL, 0);
if (!opts->comp_level)
opts->comp_level = comp_level_default;
}
return 0;
}
#endif
static unsigned int comp_level_max = 22;
static int record__comp_enabled(struct record *rec)
{
return rec->opts.comp_level > 0;
}
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct record *rec = container_of(tool, struct record, tool);
return record__write(rec, NULL, event, event->header.size);
}
static struct mutex synth_lock;
static int process_locked_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
int ret;
mutex_lock(&synth_lock);
ret = process_synthesized_event(tool, event, sample, machine);
mutex_unlock(&synth_lock);
return ret;
}
static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
{
struct record *rec = to;
if (record__comp_enabled(rec)) {
size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
bf = map->data;
}
thread->samples++;
return record__write(rec, map, bf, size);
}
static volatile sig_atomic_t signr = -1;
static volatile sig_atomic_t child_finished;
#ifdef HAVE_EVENTFD_SUPPORT
static volatile sig_atomic_t done_fd = -1;
#endif
static void sig_handler(int sig)
{
if (sig == SIGCHLD)
child_finished = 1;
else
signr = sig;
done = 1;
#ifdef HAVE_EVENTFD_SUPPORT
if (done_fd >= 0) {
u64 tmp = 1;
int orig_errno = errno;
/*
* It is possible for this signal handler to run after done is
* checked in the main loop, but before the perf counter fds are
* polled. If this happens, the poll() will continue to wait
* even though done is set, and will only break out if either
* another signal is received, or the counters are ready for
* read. To ensure the poll() doesn't sleep when done is set,
* use an eventfd (done_fd) to wake up the poll().
*/
if (write(done_fd, &tmp, sizeof(tmp)) < 0)
pr_err("failed to signal wakeup fd, error: %m\n");
errno = orig_errno;
}
#endif // HAVE_EVENTFD_SUPPORT
}
static void sigsegv_handler(int sig)
{
perf_hooks__recover();
sighandler_dump_stack(sig);
}
static void record__sig_exit(void)
{
if (signr == -1)
return;
signal(signr, SIG_DFL);
raise(signr);
}
#ifdef HAVE_AUXTRACE_SUPPORT
static int record__process_auxtrace(struct perf_tool *tool,
struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
{
struct record *rec = container_of(tool, struct record, tool);
struct perf_data *data = &rec->data;
size_t padding;
u8 pad[8] = {0};
if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
off_t file_offset;
int fd = perf_data__fd(data);
int err;
file_offset = lseek(fd, 0, SEEK_CUR);
if (file_offset == -1)
return -1;
err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
event, file_offset);
if (err)
return err;
}
/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
padding = (len1 + len2) & 7;
if (padding)
padding = 8 - padding;
record__write(rec, map, event, event->header.size);
record__write(rec, map, data1, len1);
if (len2)
record__write(rec, map, data2, len2);
record__write(rec, map, &pad, padding);
return 0;
}
static int record__auxtrace_mmap_read(struct record *rec,
struct mmap *map)
{
int ret;
ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
record__process_auxtrace);
if (ret < 0)
return ret;
if (ret)
rec->samples++;
return 0;
}
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
struct mmap *map)
{
int ret;
ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
record__process_auxtrace,
rec->opts.auxtrace_snapshot_size);
if (ret < 0)
return ret;
if (ret)
rec->samples++;
return 0;
}
static int record__auxtrace_read_snapshot_all(struct record *rec)
{
int i;
int rc = 0;
for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
struct mmap *map = &rec->evlist->mmap[i];
if (!map->auxtrace_mmap.base)
continue;
if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
rc = -1;
goto out;
}
}
out:
return rc;
}
static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
{
pr_debug("Recording AUX area tracing snapshot\n");
if (record__auxtrace_read_snapshot_all(rec) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
} else {
if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
trigger_error(&auxtrace_snapshot_trigger);
else
trigger_ready(&auxtrace_snapshot_trigger);
}
}
static int record__auxtrace_snapshot_exit(struct record *rec)
{
if (trigger_is_error(&auxtrace_snapshot_trigger))
return 0;
if (!auxtrace_record__snapshot_started &&
auxtrace_record__snapshot_start(rec->itr))
return -1;
record__read_auxtrace_snapshot(rec, true);
if (trigger_is_error(&auxtrace_snapshot_trigger))
return -1;
return 0;
}
static int record__auxtrace_init(struct record *rec)
{
int err;
if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
&& record__threads_enabled(rec)) {
pr_err("AUX area tracing options are not available in parallel streaming mode.\n");
return -EINVAL;
}
if (!rec->itr) {
rec->itr = auxtrace_record__init(rec->evlist, &err);
if (err)
return err;
}
err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
rec->opts.auxtrace_snapshot_opts);
if (err)
return err;
err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
rec->opts.auxtrace_sample_opts);
if (err)
return err;
auxtrace_regroup_aux_output(rec->evlist);
return auxtrace_parse_filters(rec->evlist);
}
#else
static inline
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
struct mmap *map __maybe_unused)
{
return 0;
}
static inline
void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
bool on_exit __maybe_unused)
{
}
static inline
int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
{
return 0;
}
static inline
int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
{
return 0;
}
static int record__auxtrace_init(struct record *rec __maybe_unused)
{
return 0;
}
#endif
static int record__config_text_poke(struct evlist *evlist)
{
struct evsel *evsel;
/* Nothing to do if text poke is already configured */
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.text_poke)
return 0;
}
evsel = evlist__add_dummy_on_all_cpus(evlist);
if (!evsel)
return -ENOMEM;
evsel->core.attr.text_poke = 1;
evsel->core.attr.ksymbol = 1;
evsel->immediate = true;
evsel__set_sample_bit(evsel, TIME);
return 0;
}
static int record__config_off_cpu(struct record *rec)
{
return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
}
static bool record__kcore_readable(struct machine *machine)
{
char kcore[PATH_MAX];
int fd;
scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
fd = open(kcore, O_RDONLY);
if (fd < 0)
return false;
close(fd);
return true;
}
static int record__kcore_copy(struct machine *machine, struct perf_data *data)
{
char from_dir[PATH_MAX];
char kcore_dir[PATH_MAX];
int ret;
snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
if (ret)
return ret;
return kcore_copy(from_dir, kcore_dir);
}
static void record__thread_data_init_pipes(struct record_thread *thread_data)
{
thread_data->pipes.msg[0] = -1;
thread_data->pipes.msg[1] = -1;
thread_data->pipes.ack[0] = -1;
thread_data->pipes.ack[1] = -1;
}
static int record__thread_data_open_pipes(struct record_thread *thread_data)
{
if (pipe(thread_data->pipes.msg))
return -EINVAL;
if (pipe(thread_data->pipes.ack)) {
close(thread_data->pipes.msg[0]);
thread_data->pipes.msg[0] = -1;
close(thread_data->pipes.msg[1]);
thread_data->pipes.msg[1] = -1;
return -EINVAL;
}
pr_debug2("thread_data[%p]: msg=[%d,%d], ack=[%d,%d]\n", thread_data,
thread_data->pipes.msg[0], thread_data->pipes.msg[1],
thread_data->pipes.ack[0], thread_data->pipes.ack[1]);
return 0;
}
static void record__thread_data_close_pipes(struct record_thread *thread_data)
{
if (thread_data->pipes.msg[0] != -1) {
close(thread_data->pipes.msg[0]);
thread_data->pipes.msg[0] = -1;
}
if (thread_data->pipes.msg[1] != -1) {
close(thread_data->pipes.msg[1]);
thread_data->pipes.msg[1] = -1;
}
if (thread_data->pipes.ack[0] != -1) {
close(thread_data->pipes.ack[0]);
thread_data->pipes.ack[0] = -1;
}
if (thread_data->pipes.ack[1] != -1) {
close(thread_data->pipes.ack[1]);
thread_data->pipes.ack[1] = -1;
}
}
static bool evlist__per_thread(struct evlist *evlist)
{
return cpu_map__is_dummy(evlist->core.user_requested_cpus);
}
static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
{
int m, tm, nr_mmaps = evlist->core.nr_mmaps;
struct mmap *mmap = evlist->mmap;
struct mmap *overwrite_mmap = evlist->overwrite_mmap;
struct perf_cpu_map *cpus = evlist->core.all_cpus;
bool per_thread = evlist__per_thread(evlist);
if (per_thread)
thread_data->nr_mmaps = nr_mmaps;
else
thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
thread_data->mask->maps.nbits);
if (mmap) {
thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
if (!thread_data->maps)
return -ENOMEM;
}
if (overwrite_mmap) {
thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
if (!thread_data->overwrite_maps) {
zfree(&thread_data->maps);
return -ENOMEM;
}
}
pr_debug2("thread_data[%p]: nr_mmaps=%d, maps=%p, ow_maps=%p\n", thread_data,
thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
if (per_thread ||
test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
if (thread_data->maps) {
thread_data->maps[tm] = &mmap[m];
pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
}
if (thread_data->overwrite_maps) {
thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
}
tm++;
}
}
return 0;
}
static int record__thread_data_init_pollfd(struct record_thread *thread_data, struct evlist *evlist)
{
int f, tm, pos;
struct mmap *map, *overwrite_map;
fdarray__init(&thread_data->pollfd, 64);
for (tm = 0; tm < thread_data->nr_mmaps; tm++) {
map = thread_data->maps ? thread_data->maps[tm] : NULL;
overwrite_map = thread_data->overwrite_maps ?
thread_data->overwrite_maps[tm] : NULL;
for (f = 0; f < evlist->core.pollfd.nr; f++) {
void *ptr = evlist->core.pollfd.priv[f].ptr;
if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
pos = fdarray__dup_entry_from(&thread_data->pollfd, f,
&evlist->core.pollfd);
if (pos < 0)
return pos;
pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n",
thread_data, pos, evlist->core.pollfd.entries[f].fd);
}
}
}
return 0;
}
static void record__free_thread_data(struct record *rec)
{
int t;
struct record_thread *thread_data = rec->thread_data;
if (thread_data == NULL)
return;
for (t = 0; t < rec->nr_threads; t++) {
record__thread_data_close_pipes(&thread_data[t]);
zfree(&thread_data[t].maps);
zfree(&thread_data[t].overwrite_maps);
fdarray__exit(&thread_data[t].pollfd);
}
zfree(&rec->thread_data);
}
static int record__map_thread_evlist_pollfd_indexes(struct record *rec,
int evlist_pollfd_index,
int thread_pollfd_index)
{
size_t x = rec->index_map_cnt;
if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL))
return -ENOMEM;
rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index;
rec->index_map[x].thread_pollfd_index = thread_pollfd_index;
rec->index_map_cnt += 1;
return 0;
}
static int record__update_evlist_pollfd_from_thread(struct record *rec,
struct evlist *evlist,
struct record_thread *thread_data)
{
struct pollfd *e_entries = evlist->core.pollfd.entries;
struct pollfd *t_entries = thread_data->pollfd.entries;
int err = 0;
size_t i;
for (i = 0; i < rec->index_map_cnt; i++) {
int e_pos = rec->index_map[i].evlist_pollfd_index;
int t_pos = rec->index_map[i].thread_pollfd_index;
if (e_entries[e_pos].fd != t_entries[t_pos].fd ||
e_entries[e_pos].events != t_entries[t_pos].events) {
pr_err("Thread and evlist pollfd index mismatch\n");
err = -EINVAL;
continue;
}
e_entries[e_pos].revents = t_entries[t_pos].revents;
}
return err;
}
static int record__dup_non_perf_events(struct record *rec,
struct evlist *evlist,
struct record_thread *thread_data)
{
struct fdarray *fda = &evlist->core.pollfd;
int i, ret;
for (i = 0; i < fda->nr; i++) {
if (!(fda->priv[i].flags & fdarray_flag__non_perf_event))
continue;
ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda);
if (ret < 0) {
pr_err("Failed to duplicate descriptor in main thread pollfd\n");
return ret;
}
pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n",
thread_data, ret, fda->entries[i].fd);
ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret);
if (ret < 0) {
pr_err("Failed to map thread and evlist pollfd indexes\n");
return ret;
}
}
return 0;
}
static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
{
int t, ret;
struct record_thread *thread_data;
rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
if (!rec->thread_data) {
pr_err("Failed to allocate thread data\n");
return -ENOMEM;
}
thread_data = rec->thread_data;
for (t = 0; t < rec->nr_threads; t++)
record__thread_data_init_pipes(&thread_data[t]);
for (t = 0; t < rec->nr_threads; t++) {
thread_data[t].rec = rec;
thread_data[t].mask = &rec->thread_masks[t];
ret = record__thread_data_init_maps(&thread_data[t], evlist);
if (ret) {
pr_err("Failed to initialize thread[%d] maps\n", t);
goto out_free;
}
ret = record__thread_data_init_pollfd(&thread_data[t], evlist);
if (ret) {
pr_err("Failed to initialize thread[%d] pollfd\n", t);
goto out_free;
}
if (t) {
thread_data[t].tid = -1;
ret = record__thread_data_open_pipes(&thread_data[t]);
if (ret) {
pr_err("Failed to open thread[%d] communication pipes\n", t);
goto out_free;
}
ret = fdarray__add(&thread_data[t].pollfd, thread_data[t].pipes.msg[0],
POLLIN | POLLERR | POLLHUP, fdarray_flag__nonfilterable);
if (ret < 0) {
pr_err("Failed to add descriptor to thread[%d] pollfd\n", t);
goto out_free;
}
thread_data[t].ctlfd_pos = ret;
pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n",
thread_data, thread_data[t].ctlfd_pos,
thread_data[t].pipes.msg[0]);
} else {
thread_data[t].tid = gettid();
ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]);
if (ret < 0)
goto out_free;
thread_data[t].ctlfd_pos = -1; /* Not used */
}
}
return 0;
out_free:
record__free_thread_data(rec);
return ret;
}
static int record__mmap_evlist(struct record *rec,
struct evlist *evlist)
{
int i, ret;
struct record_opts *opts = &rec->opts;
bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
opts->auxtrace_sample_mode;
char msg[512];
if (opts->affinity != PERF_AFFINITY_SYS)
cpu__setup_cpunode_map();
if (evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
auxtrace_overwrite,
opts->nr_cblocks, opts->affinity,
opts->mmap_flush, opts->comp_level) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
"/proc/sys/kernel/perf_event_mlock_kb,\n"
"or try again with a smaller value of -m/--mmap_pages.\n"
"(current value: %u,%u)\n",
opts->mmap_pages, opts->auxtrace_mmap_pages);
return -errno;
} else {
pr_err("failed to mmap with %d (%s)\n", errno,
str_error_r(errno, msg, sizeof(msg)));
if (errno)
return -errno;
else
return -EINVAL;
}
}
if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack))
return -1;
ret = record__alloc_thread_data(rec, evlist);
if (ret)
return ret;
if (record__threads_enabled(rec)) {
ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
if (ret) {
pr_err("Failed to create data directory: %s\n", strerror(-ret));
return ret;
}
for (i = 0; i < evlist->core.nr_mmaps; i++) {
if (evlist->mmap)
evlist->mmap[i].file = &rec->data.dir.files[i];
if (evlist->overwrite_mmap)
evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
}
}
return 0;
}
static int record__mmap(struct record *rec)
{
return record__mmap_evlist(rec, rec->evlist);
}
static int record__open(struct record *rec)
{
char msg[BUFSIZ];
struct evsel *pos;
struct evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
int rc = 0;
/*
* For initial_delay, system wide or a hybrid system, we need to add a
* dummy event so that we can track PERF_RECORD_MMAP to cover the delay
* of waiting or event synthesis.
*/
if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
perf_pmus__num_core_pmus() > 1) {
pos = evlist__get_tracking_event(evlist);
if (!evsel__is_dummy_event(pos)) {
/* Set up dummy event. */
if (evlist__add_dummy(evlist))
return -ENOMEM;
pos = evlist__last(evlist);
evlist__set_tracking_event(evlist, pos);
}
/*
* Enable the dummy event when the process is forked for
* initial_delay, immediately for system wide.
*/
if (opts->target.initial_delay && !pos->immediate &&
!target__has_cpu(&opts->target))
pos->core.attr.enable_on_exec = 1;
else
pos->immediate = 1;
}
evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, pos) {
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
if ((errno == EINVAL || errno == EBADF) &&
pos->core.leader != &pos->core &&
pos->weak_group) {
pos = evlist__reset_weak_group(evlist, pos, true);
goto try_again;
}
rc = -errno;
evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out;
}
pos->supported = true;
}
if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
"file is not found in the buildid cache or in the vmlinux path.\n\n"
"Samples in kernel modules won't be resolved at all.\n\n"
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
}
if (evlist__apply_filters(evlist, &pos)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter ?: "BPF", evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
}
rc = record__mmap(rec);
if (rc)
goto out;
session->evlist = evlist;
perf_session__set_id_hdr_size(session);
out:
return rc;
}
static void set_timestamp_boundary(struct record *rec, u64 sample_time)
{
if (rec->evlist->first_sample_time == 0)
rec->evlist->first_sample_time = sample_time;
if (sample_time)
rec->evlist->last_sample_time = sample_time;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine)
{
struct record *rec = container_of(tool, struct record, tool);
set_timestamp_boundary(rec, sample->time);
if (rec->buildid_all)
return 0;
rec->samples++;
return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
}
static int process_buildids(struct record *rec)
{
struct perf_session *session = rec->session;
if (perf_data__size(&rec->data) == 0)
return 0;
/*
* During this process, it'll load kernel map and replace the
* dso->long_name to a real pathname it found. In this case
* we prefer the vmlinux path like
* /lib/modules/3.16.4/build/vmlinux
*
* rather than build-id path (in debug directory).
* $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
*/
symbol_conf.ignore_vmlinux_buildid = true;
/*
* If --buildid-all is given, it marks all DSO regardless of hits,
* so no need to process samples. But if timestamp_boundary is enabled,
* it still needs to walk on all samples to get the timestamps of
* first/last samples.
*/
if (rec->buildid_all && !rec->timestamp_boundary)
rec->tool.sample = NULL;
return perf_session__process_events(session);
}
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
{
int err;
struct perf_tool *tool = data;
/*
*As for guest kernel when processing subcommand record&report,
*we arrange module mmap prior to guest kernel mmap and trigger
*a preload dso because default guest module symbols are loaded
*from guest kallsyms instead of /lib/modules/XXX/XXX. This
*method is used to avoid symbol missing when the first addr is
*in module instead of in guest kernel.
*/
err = perf_event__synthesize_modules(tool, process_synthesized_event,
machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
/*
* We use _stext for guest kernel because guest kernel's /proc/kallsyms
* have no _text sometimes.
*/
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
}
static struct perf_event_header finished_round_event = {
.size = sizeof(struct perf_event_header),
.type = PERF_RECORD_FINISHED_ROUND,
};
static struct perf_event_header finished_init_event = {
.size = sizeof(struct perf_event_header),
.type = PERF_RECORD_FINISHED_INIT,
};
static void record__adjust_affinity(struct record *rec, struct mmap *map)
{
if (rec->opts.affinity != PERF_AFFINITY_SYS &&
!bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
thread->mask->affinity.nbits)) {
bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits);
bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits,
map->affinity_mask.bits, thread->mask->affinity.nbits);
sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
(cpu_set_t *)thread->mask->affinity.bits);
if (verbose == 2) {
pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu());
mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity");
}
}
}
static size_t process_comp_header(void *record, size_t increment)
{
struct perf_record_compressed *event = record;
size_t size = sizeof(*event);
if (increment) {
event->header.size += increment;
return increment;
}
event->header.type = PERF_RECORD_COMPRESSED;
event->header.size = size;
return size;
}
static size_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size)
{
size_t compressed;
size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
struct zstd_data *zstd_data = &session->zstd_data;
if (map && map->file)
zstd_data = &map->zstd_data;
compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
max_record_size, process_comp_header);
if (map && map->file) {
thread->bytes_transferred += src_size;
thread->bytes_compressed += compressed;
} else {
session->bytes_transferred += src_size;
session->bytes_compressed += compressed;
}
return compressed;
}
static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
bool overwrite, bool synch)
{
u64 bytes_written = rec->bytes_written;
int i;
int rc = 0;
int nr_mmaps;
struct mmap **maps;
int trace_fd = rec->data.file.fd;
off_t off = 0;
if (!evlist)
return 0;
nr_mmaps = thread->nr_mmaps;
maps = overwrite ? thread->overwrite_maps : thread->maps;
if (!maps)
return 0;
if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
if (record__aio_enabled(rec))
off = record__aio_get_pos(trace_fd);
for (i = 0; i < nr_mmaps; i++) {
u64 flush = 0;
struct mmap *map = maps[i];
if (map->core.base) {
record__adjust_affinity(rec, map);
if (synch) {
flush = map->core.flush;
map->core.flush = 1;
}
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) < 0) {
if (synch)
map->core.flush = flush;
rc = -1;
goto out;
}
} else {
if (record__aio_push(rec, map, &off) < 0) {
record__aio_set_pos(trace_fd, off);
if (synch)
map->core.flush = flush;
rc = -1;
goto out;
}
}
if (synch)
map->core.flush = flush;
}
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
!rec->opts.auxtrace_sample_mode &&
record__auxtrace_mmap_read(rec, map) != 0) {
rc = -1;
goto out;
}
}
if (record__aio_enabled(rec))
record__aio_set_pos(trace_fd, off);
/*
* Mark the round finished in case we wrote
* at least one event.
*
* No need for round events in directory mode,
* because per-cpu maps and files have data
* sorted by kernel.
*/
if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
if (overwrite)
evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
out:
return rc;
}
static int record__mmap_read_all(struct record *rec, bool synch)
{
int err;
err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
if (err)
return err;
return record__mmap_read_evlist(rec, rec->evlist, true, synch);
}
static void record__thread_munmap_filtered(struct fdarray *fda, int fd,
void *arg __maybe_unused)
{
struct perf_mmap *map = fda->priv[fd].ptr;
if (map)
perf_mmap__put(map);
}
static void *record__thread(void *arg)
{
enum thread_msg msg = THREAD_MSG__READY;
bool terminate = false;
struct fdarray *pollfd;
int err, ctlfd_pos;
thread = arg;
thread->tid = gettid();
err = write(thread->pipes.ack[1], &msg, sizeof(msg));
if (err == -1)
pr_warning("threads[%d]: failed to notify on start: %s\n",
thread->tid, strerror(errno));
pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
pollfd = &thread->pollfd;
ctlfd_pos = thread->ctlfd_pos;
for (;;) {
unsigned long long hits = thread->samples;
if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
break;
if (hits == thread->samples) {
err = fdarray__poll(pollfd, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
*/
if (err > 0 || (err < 0 && errno == EINTR))
err = 0;
thread->waking++;
if (fdarray__filter(pollfd, POLLERR | POLLHUP,
record__thread_munmap_filtered, NULL) == 0)
break;
}
if (pollfd->entries[ctlfd_pos].revents & POLLHUP) {
terminate = true;
close(thread->pipes.msg[0]);
thread->pipes.msg[0] = -1;
pollfd->entries[ctlfd_pos].fd = -1;
pollfd->entries[ctlfd_pos].events = 0;
}
pollfd->entries[ctlfd_pos].revents = 0;
}
record__mmap_read_all(thread->rec, true);
err = write(thread->pipes.ack[1], &msg, sizeof(msg));
if (err == -1)
pr_warning("threads[%d]: failed to notify on termination: %s\n",
thread->tid, strerror(errno));
return NULL;
}
static void record__init_features(struct record *rec)
{
struct perf_session *session = rec->session;
int feat;
for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
perf_header__set_feat(&session->header, feat);
if (rec->no_buildid)
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
#ifdef HAVE_LIBTRACEEVENT
if (!have_tracepoints(&rec->evlist->core.entries))
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
#endif
if (!rec->opts.branch_stack)
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
if (!rec->opts.full_auxtrace)
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
perf_header__clear_feat(&session->header, HEADER_CLOCKID);
if (!rec->opts.use_clockid)
perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
if (!record__threads_enabled(rec))
perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
if (!record__comp_enabled(rec))
perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
perf_header__clear_feat(&session->header, HEADER_STAT);
}
static void
record__finish_output(struct record *rec)
{
int i;
struct perf_data *data = &rec->data;
int fd = perf_data__fd(data);
if (data->is_pipe)
return;
rec->session->header.data_size += rec->bytes_written;
data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
if (record__threads_enabled(rec)) {
for (i = 0; i < data->dir.nr; i++)
data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
}
if (!rec->no_buildid) {
process_buildids(rec);
if (rec->buildid_all)
dsos__hit_all(rec->session);
}
perf_session__write_header(rec->session, rec->evlist, fd, true);
return;
}
static int record__synthesize_workload(struct record *rec, bool tail)
{
int err;
struct perf_thread_map *thread_map;
bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
if (rec->opts.tail_synthesize != tail)
return 0;
thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
if (thread_map == NULL)
return -1;
err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
process_synthesized_event,
&rec->session->machines.host,
needs_mmap,
rec->opts.sample_address);
perf_thread_map__put(thread_map);
return err;
}
static int write_finished_init(struct record *rec, bool tail)
{
if (rec->opts.tail_synthesize != tail)
return 0;
return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
}
static int record__synthesize(struct record *rec, bool tail);
static int
record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
int fd, err;
char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
record__aio_mmap_read_sync(rec);
write_finished_init(rec, true);
record__synthesize(rec, true);
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
rec->samples = 0;
record__finish_output(rec);
err = fetch_current_timestamp(timestamp, sizeof(timestamp));
if (err) {
pr_err("Failed to get current timestamp\n");
return -EINVAL;
}
fd = perf_data__switch(data, timestamp,
rec->session->header.data_offset,
at_exit, &new_filename);
if (fd >= 0 && !at_exit) {
rec->bytes_written = 0;
rec->session->header.data_size = 0;
}
if (!quiet)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
data->path, timestamp);
if (rec->switch_output.num_files) {
int n = rec->switch_output.cur_file + 1;
if (n >= rec->switch_output.num_files)
n = 0;
rec->switch_output.cur_file = n;
if (rec->switch_output.filenames[n]) {
remove(rec->switch_output.filenames[n]);
zfree(&rec->switch_output.filenames[n]);
}
rec->switch_output.filenames[n] = new_filename;
} else {
free(new_filename);
}
/* Output tracking events */
if (!at_exit) {
record__synthesize(rec, false);
/*
* In 'perf record --switch-output' without -a,
* record__synthesize() in record__switch_output() won't
* generate tracking events because there's no thread_map
* in evlist. Which causes newly created perf.data doesn't
* contain map and comm information.
* Create a fake thread_map and directly call
* perf_event__synthesize_thread_map() for those events.
*/
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, false);
write_finished_init(rec, false);
}
return fd;
}
static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
struct perf_record_lost_samples *lost,
int cpu_idx, int thread_idx, u64 lost_count,
u16 misc_flag)
{
struct perf_sample_id *sid;
struct perf_sample sample = {};
int id_hdr_size;
lost->lost = lost_count;
if (evsel->core.ids) {
sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx);
sample.id = sid->id;
}
id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1),
evsel->core.attr.sample_type, &sample);
lost->header.size = sizeof(*lost) + id_hdr_size;
lost->header.misc = misc_flag;
record__write(rec, NULL, lost, lost->header.size);
}
static void record__read_lost_samples(struct record *rec)
{
struct perf_session *session = rec->session;
struct perf_record_lost_samples *lost;
struct evsel *evsel;
/* there was an error during record__open */
if (session->evlist == NULL)
return;
lost = zalloc(PERF_SAMPLE_MAX_SIZE);
if (lost == NULL) {
pr_debug("Memory allocation failed\n");
return;
}
lost->header.type = PERF_RECORD_LOST_SAMPLES;
evlist__for_each_entry(session->evlist, evsel) {
struct xyarray *xy = evsel->core.sample_id;
u64 lost_count;
if (xy == NULL || evsel->core.fd == NULL)
continue;
if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) ||
xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) {
pr_debug("Unmatched FD vs. sample ID: skip reading LOST count\n");
continue;
}
for (int x = 0; x < xyarray__max_x(xy); x++) {
for (int y = 0; y < xyarray__max_y(xy); y++) {
struct perf_counts_values count;
if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
pr_debug("read LOST count failed\n");
goto out;
}
if (count.lost) {
__record__save_lost_samples(rec, evsel, lost,
x, y, count.lost, 0);
}
}
}
lost_count = perf_bpf_filter__lost_count(evsel);
if (lost_count)
__record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
PERF_RECORD_MISC_LOST_SAMPLES_BPF);
}
out:
free(lost);
}
static volatile sig_atomic_t workload_exec_errno;
/*
* evlist__prepare_workload will send a SIGUSR1
* if the fork fails, since we asked by setting its
* want_signal to true.
*/
static void workload_exec_failed_signal(int signo __maybe_unused,
siginfo_t *info,
void *ucontext __maybe_unused)
{
workload_exec_errno = info->si_value.sival_int;
done = 1;
child_finished = 1;
}
static void snapshot_sig_handler(int sig);
static void alarm_sig_handler(int sig);
static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
{
if (evlist) {
if (evlist->mmap && evlist->mmap[0].core.base)
return evlist->mmap[0].core.base;
if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
return evlist->overwrite_mmap[0].core.base;
}
return NULL;
}
static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
{
const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
if (pc)
return pc;
return NULL;
}
static int record__synthesize(struct record *rec, bool tail)
{
struct perf_session *session = rec->session;
struct machine *machine = &session->machines.host;
struct perf_data *data = &rec->data;
struct record_opts *opts = &rec->opts;
struct perf_tool *tool = &rec->tool;
int err = 0;
event_op f = process_synthesized_event;
if (rec->opts.tail_synthesize != tail)
return 0;
if (data->is_pipe) {
err = perf_event__synthesize_for_pipe(tool, session, data,
process_synthesized_event);
if (err < 0)
goto out;
rec->bytes_written += err;
}
err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
process_synthesized_event, machine);
if (err)
goto out;
/* Synthesize id_index before auxtrace_info */
err = perf_event__synthesize_id_index(tool,
process_synthesized_event,
session->evlist, machine);
if (err)
goto out;
if (rec->opts.full_auxtrace) {
err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
session, process_synthesized_event);
if (err)
goto out;
}
if (!evlist__exclude_kernel(rec->evlist)) {
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
machine);
WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/kallsyms permission or run as root.\n");
err = perf_event__synthesize_modules(tool, process_synthesized_event,
machine);
WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/modules permission or run as root.\n");
}
if (perf_guest) {
machines__process_guests(&session->machines,
perf_event__synthesize_guest_os, tool);
}
err = perf_event__synthesize_extra_attr(&rec->tool,
rec->evlist,
process_synthesized_event,
data->is_pipe);
if (err)
goto out;
err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
process_synthesized_event,
NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;
}
err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n");
return err;
}
err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
machine, opts);
if (err < 0) {
pr_warning("Couldn't synthesize bpf events.\n");
err = 0;
}
if (rec->opts.synth & PERF_SYNTH_CGROUP) {
err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
machine);
if (err < 0) {
pr_warning("Couldn't synthesize cgroup events.\n");
err = 0;
}
}
if (rec->opts.nr_threads_synthesize > 1) {
mutex_init(&synth_lock);
perf_set_multithreaded();
f = process_locked_synthesized_event;
}
if (rec->opts.synth & PERF_SYNTH_TASK) {
bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
err = __machine__synthesize_threads(machine, tool, &opts->target,
rec->evlist->core.threads,
f, needs_mmap, opts->sample_address,
rec->opts.nr_threads_synthesize);
}
if (rec->opts.nr_threads_synthesize > 1) {
perf_set_singlethreaded();
mutex_destroy(&synth_lock);
}
out:
return err;
}
static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
{
struct record *rec = data;
pthread_kill(rec->thread_id, SIGUSR2);
return 0;
}
static int record__setup_sb_evlist(struct record *rec)
{
struct record_opts *opts = &rec->opts;
if (rec->sb_evlist != NULL) {
/*
* We get here if --switch-output-event populated the
* sb_evlist, so associate a callback that will send a SIGUSR2
* to the main thread.
*/
evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
rec->thread_id = pthread_self();
}
#ifdef HAVE_LIBBPF_SUPPORT
if (!opts->no_bpf_event) {
if (rec->sb_evlist == NULL) {
rec->sb_evlist = evlist__new();
if (rec->sb_evlist == NULL) {
pr_err("Couldn't create side band evlist.\n.");
return -1;
}
}
if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
return -1;
}
}
#endif
if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
return 0;
}
static int record__init_clock(struct record *rec)
{
struct perf_session *session = rec->session;
struct timespec ref_clockid;
struct timeval ref_tod;
u64 ref;
if (!rec->opts.use_clockid)
return 0;
if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
session->header.env.clock.clockid = rec->opts.clockid;
if (gettimeofday(&ref_tod, NULL) != 0) {
pr_err("gettimeofday failed, cannot set reference time.\n");
return -1;
}
if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
pr_err("clock_gettime failed, cannot set reference time.\n");
return -1;
}
ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
(u64) ref_tod.tv_usec * NSEC_PER_USEC;
session->header.env.clock.tod_ns = ref;
ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
(u64) ref_clockid.tv_nsec;
session->header.env.clock.clockid_ns = ref;
return 0;
}
static void hit_auxtrace_snapshot_trigger(struct record *rec)
{
if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
trigger_hit(&auxtrace_snapshot_trigger);
auxtrace_record__snapshot_started = 1;
if (auxtrace_record__snapshot_start(rec->itr))
trigger_error(&auxtrace_snapshot_trigger);
}
}
static void record__uniquify_name(struct record *rec)
{
struct evsel *pos;
struct evlist *evlist = rec->evlist;
char *new_name;
int ret;
if (perf_pmus__num_core_pmus() == 1)
return;
evlist__for_each_entry(evlist, pos) {
if (!evsel__is_hybrid(pos))
continue;
if (strchr(pos->name, '/'))
continue;
ret = asprintf(&new_name, "%s/%s/",
pos->pmu_name, pos->name);
if (ret) {
free(pos->name);
pos->name = new_name;
}
}
}
static int record__terminate_thread(struct record_thread *thread_data)
{
int err;
enum thread_msg ack = THREAD_MSG__UNDEFINED;
pid_t tid = thread_data->tid;
close(thread_data->pipes.msg[1]);
thread_data->pipes.msg[1] = -1;
err = read(thread_data->pipes.ack[0], &ack, sizeof(ack));
if (err > 0)
pr_debug2("threads[%d]: sent %s\n", tid, thread_msg_tags[ack]);
else
pr_warning("threads[%d]: failed to receive termination notification from %d\n",
thread->tid, tid);
return 0;
}
static int record__start_threads(struct record *rec)
{
int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
struct record_thread *thread_data = rec->thread_data;
sigset_t full, mask;
pthread_t handle;
pthread_attr_t attrs;
thread = &thread_data[0];
if (!record__threads_enabled(rec))
return 0;
sigfillset(&full);
if (sigprocmask(SIG_SETMASK, &full, &mask)) {
pr_err("Failed to block signals on threads start: %s\n", strerror(errno));
return -1;
}
pthread_attr_init(&attrs);
pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
for (t = 1; t < nr_threads; t++) {
enum thread_msg msg = THREAD_MSG__UNDEFINED;
#ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
pthread_attr_setaffinity_np(&attrs,
MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)),
(cpu_set_t *)(thread_data[t].mask->affinity.bits));
#endif
if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) {
for (tt = 1; tt < t; tt++)
record__terminate_thread(&thread_data[t]);
pr_err("Failed to start threads: %s\n", strerror(errno));
ret = -1;
goto out_err;
}
err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg));
if (err > 0)
pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
thread_msg_tags[msg]);
else
pr_warning("threads[%d]: failed to receive start notification from %d\n",
thread->tid, rec->thread_data[t].tid);
}
sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
(cpu_set_t *)thread->mask->affinity.bits);
pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
out_err:
pthread_attr_destroy(&attrs);
if (sigprocmask(SIG_SETMASK, &mask, NULL)) {
pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno));
ret = -1;
}
return ret;
}
static int record__stop_threads(struct record *rec)
{
int t;
struct record_thread *thread_data = rec->thread_data;
for (t = 1; t < rec->nr_threads; t++)
record__terminate_thread(&thread_data[t]);
for (t = 0; t < rec->nr_threads; t++) {
rec->samples += thread_data[t].samples;
if (!record__threads_enabled(rec))
continue;
rec->session->bytes_transferred += thread_data[t].bytes_transferred;
rec->session->bytes_compressed += thread_data[t].bytes_compressed;
pr_debug("threads[%d]: samples=%lld, wakes=%ld, ", thread_data[t].tid,
thread_data[t].samples, thread_data[t].waking);
if (thread_data[t].bytes_transferred && thread_data[t].bytes_compressed)
pr_debug("transferred=%" PRIu64 ", compressed=%" PRIu64 "\n",
thread_data[t].bytes_transferred, thread_data[t].bytes_compressed);
else
pr_debug("written=%" PRIu64 "\n", thread_data[t].bytes_written);
}
return 0;
}
static unsigned long record__waking(struct record *rec)
{
int t;
unsigned long waking = 0;
struct record_thread *thread_data = rec->thread_data;
for (t = 0; t < rec->nr_threads; t++)
waking += thread_data[t].waking;
return waking;
}
static int __cmd_record(struct record *rec, int argc, const char **argv)
{
int err;
int status = 0;
const bool forks = argc > 0;
struct perf_tool *tool = &rec->tool;
struct record_opts *opts = &rec->opts;
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
int fd;
float ratio = 0;
enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
atexit(record__sig_exit);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
signal(SIGSEGV, sigsegv_handler);
if (rec->opts.record_namespaces)
tool->namespace_events = true;
if (rec->opts.record_cgroup) {
#ifdef HAVE_FILE_HANDLE
tool->cgroup_events = true;
#else
pr_err("cgroup tracking is not supported\n");
return -1;
#endif
}
if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
signal(SIGUSR2, snapshot_sig_handler);
if (rec->opts.auxtrace_snapshot_mode)
trigger_on(&auxtrace_snapshot_trigger);
if (rec->switch_output.enabled)
trigger_on(&switch_output_trigger);
} else {
signal(SIGUSR2, SIG_IGN);
}
session = perf_session__new(data, tool);
if (IS_ERR(session)) {
pr_err("Perf session creation failed.\n");
return PTR_ERR(session);
}
if (record__threads_enabled(rec)) {
if (perf_data__is_pipe(&rec->data)) {
pr_err("Parallel trace streaming is not available in pipe mode.\n");
return -1;
}
if (rec->opts.full_auxtrace) {
pr_err("Parallel trace streaming is not available in AUX area tracing mode.\n");
return -1;
}
}
fd = perf_data__fd(data);
rec->session = session;
if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
pr_err("Compression initialization failed.\n");
return -1;
}
#ifdef HAVE_EVENTFD_SUPPORT
done_fd = eventfd(0, EFD_NONBLOCK);
if (done_fd < 0) {
pr_err("Failed to create wakeup eventfd, error: %m\n");
status = -1;
goto out_delete_session;
}
err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
if (err < 0) {
pr_err("Failed to add wakeup eventfd to poll list\n");
status = err;
goto out_delete_session;
}
#endif // HAVE_EVENTFD_SUPPORT
session->header.env.comp_type = PERF_COMP_ZSTD;
session->header.env.comp_level = rec->opts.comp_level;
if (rec->opts.kcore &&
!record__kcore_readable(&session->machines.host)) {
pr_err("ERROR: kcore is not readable.\n");
return -1;
}
if (record__init_clock(rec))
return -1;
record__init_features(rec);
if (forks) {
err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
workload_exec_failed_signal);
if (err < 0) {
pr_err("Couldn't run the workload!\n");
status = err;
goto out_delete_session;
}
}
/*
* If we have just single event and are sending data
* through pipe, we need to force the ids allocation,
* because we synthesize event name through the pipe
* and need the id for that.
*/
if (data->is_pipe && rec->evlist->core.nr_entries == 1)
rec->opts.sample_id = true;
record__uniquify_name(rec);
/* Debug message used by test scripts */
pr_debug3("perf record opening and mmapping events\n");
if (record__open(rec) != 0) {
err = -1;
goto out_free_threads;
}
/* Debug message used by test scripts */
pr_debug3("perf record done opening and mmapping events\n");
session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
if (rec->opts.kcore) {
err = record__kcore_copy(&session->machines.host, data);
if (err) {
pr_err("ERROR: Failed to copy kcore\n");
goto out_free_threads;
}
}
/*
* Normally perf_session__new would do this, but it doesn't have the
* evlist.
*/
if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
rec->tool.ordered_events = false;
}
if (evlist__nr_groups(rec->evlist) == 0)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
if (data->is_pipe) {
err = perf_header__write_pipe(fd);
if (err < 0)
goto out_free_threads;
} else {
err = perf_session__write_header(session, rec->evlist, fd, false);
if (err < 0)
goto out_free_threads;
}
err = -1;
if (!rec->no_buildid
&& !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
pr_err("Couldn't generate buildids. "
"Use --no-buildid to profile anyway.\n");
goto out_free_threads;
}
err = record__setup_sb_evlist(rec);
if (err)
goto out_free_threads;
err = record__synthesize(rec, false);
if (err < 0)
goto out_free_threads;
if (rec->realtime_prio) {
struct sched_param param;
param.sched_priority = rec->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
pr_err("Could not set realtime priority.\n");
err = -1;
goto out_free_threads;
}
}
if (record__start_threads(rec))
goto out_free_threads;
/*
* When perf is starting the traced process, all the events
* (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them.
*/
if (!target__none(&opts->target) && !opts->target.initial_delay)
evlist__enable(rec->evlist);
/*
* Let the child rip
*/
if (forks) {
struct machine *machine = &session->machines.host;
union perf_event *event;
pid_t tgid;
event = malloc(sizeof(event->comm) + machine->id_hdr_size);
if (event == NULL) {
err = -ENOMEM;
goto out_child;
}
/*
* Some H/W events are generated before COMM event
* which is emitted during exec(), so perf script
* cannot see a correct process name for those events.
* Synthesize COMM event to prevent it.
*/
tgid = perf_event__synthesize_comm(tool, event,
rec->evlist->workload.pid,
process_synthesized_event,
machine);
free(event);
if (tgid == -1)
goto out_child;
event = malloc(sizeof(event->namespaces) +
(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
machine->id_hdr_size);
if (event == NULL) {
err = -ENOMEM;
goto out_child;
}
/*
* Synthesize NAMESPACES event for the command specified.
*/
perf_event__synthesize_namespaces(tool, event,
rec->evlist->workload.pid,
tgid, process_synthesized_event,
machine);
free(event);
evlist__start_workload(rec->evlist);
}
if (opts->target.initial_delay) {
pr_info(EVLIST_DISABLED_MSG);
if (opts->target.initial_delay > 0) {
usleep(opts->target.initial_delay * USEC_PER_MSEC);
evlist__enable(rec->evlist);
pr_info(EVLIST_ENABLED_MSG);
}
}
err = event_enable_timer__start(rec->evlist->eet);
if (err)
goto out_child;
/* Debug message used by test scripts */
pr_debug3("perf record has started\n");
fflush(stderr);
trigger_ready(&auxtrace_snapshot_trigger);
trigger_ready(&switch_output_trigger);
perf_hooks__invoke_record_start();
/*
* Must write FINISHED_INIT so it will be seen after all other
* synthesized user events, but before any regular events.
*/
err = write_finished_init(rec, false);
if (err < 0)
goto out_child;
for (;;) {
unsigned long long hits = thread->samples;
/*
* rec->evlist->bkw_mmap_state is possible to be
* BKW_MMAP_EMPTY here: when done == true and
* hits != rec->samples in previous round.
*
* evlist__toggle_bkw_mmap ensure we never
* convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
*/
if (trigger_is_hit(&switch_output_trigger) || done || draining)
evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
if (record__mmap_read_all(rec, false) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
trigger_error(&switch_output_trigger);
err = -1;
goto out_child;
}
if (auxtrace_record__snapshot_started) {
auxtrace_record__snapshot_started = 0;
if (!trigger_is_error(&auxtrace_snapshot_trigger))
record__read_auxtrace_snapshot(rec, false);
if (trigger_is_error(&auxtrace_snapshot_trigger)) {
pr_err("AUX area tracing snapshot failed\n");
err = -1;
goto out_child;
}
}
if (trigger_is_hit(&switch_output_trigger)) {
/*
* If switch_output_trigger is hit, the data in
* overwritable ring buffer should have been collected,
* so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
*
* If SIGUSR2 raise after or during record__mmap_read_all(),
* record__mmap_read_all() didn't collect data from
* overwritable ring buffer. Read again.
*/
if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
continue;
trigger_ready(&switch_output_trigger);
/*
* Reenable events in overwrite ring buffer after
* record__mmap_read_all(): we should have collected
* data from it.
*/
evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
if (!quiet)
fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
record__waking(rec));
thread->waking = 0;
fd = record__switch_output(rec, false);
if (fd < 0) {
pr_err("Failed to switch to new file\n");
trigger_error(&switch_output_trigger);
err = fd;
goto out_child;
}
/* re-arm the alarm */
if (rec->switch_output.time)
alarm(rec->switch_output.time);
}
if (hits == thread->samples) {
if (done || draining)
break;
err = fdarray__poll(&thread->pollfd, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
*/
if (err > 0 || (err < 0 && errno == EINTR))
err = 0;
thread->waking++;
if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP,
record__thread_munmap_filtered, NULL) == 0)
draining = true;
err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
if (err)
goto out_child;
}
if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
switch (cmd) {
case EVLIST_CTL_CMD_SNAPSHOT:
hit_auxtrace_snapshot_trigger(rec);
evlist__ctlfd_ack(rec->evlist);
break;
case EVLIST_CTL_CMD_STOP:
done = 1;
break;
case EVLIST_CTL_CMD_ACK:
case EVLIST_CTL_CMD_UNSUPPORTED:
case EVLIST_CTL_CMD_ENABLE:
case EVLIST_CTL_CMD_DISABLE:
case EVLIST_CTL_CMD_EVLIST:
case EVLIST_CTL_CMD_PING:
default:
break;
}
}
err = event_enable_timer__process(rec->evlist->eet);
if (err < 0)
goto out_child;
if (err) {
err = 0;
done = 1;
}
/*
* When perf is starting the traced process, at the end events
* die with the process and we wait for that. Thus no need to
* disable events in this case.
*/
if (done && !disabled && !target__none(&opts->target)) {
trigger_off(&auxtrace_snapshot_trigger);
evlist__disable(rec->evlist);
disabled = true;
}
}
trigger_off(&auxtrace_snapshot_trigger);
trigger_off(&switch_output_trigger);
if (opts->auxtrace_snapshot_on_exit)
record__auxtrace_snapshot_exit(rec);
if (forks && workload_exec_errno) {
char msg[STRERR_BUFSIZE], strevsels[2048];
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
strevsels, argv[0], emsg);
err = -1;
goto out_child;
}
if (!quiet)
fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n",
record__waking(rec));
write_finished_init(rec, true);
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
out_child:
record__stop_threads(rec);
record__mmap_read_all(rec, true);
out_free_threads:
record__free_thread_data(rec);
evlist__finalize_ctlfd(rec->evlist);
record__aio_mmap_read_sync(rec);
if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
session->header.env.comp_ratio = ratio + 0.5;
}
if (forks) {
int exit_status;
if (!child_finished)
kill(rec->evlist->workload.pid, SIGTERM);
wait(&exit_status);
if (err < 0)
status = err;
else if (WIFEXITED(exit_status))
status = WEXITSTATUS(exit_status);
else if (WIFSIGNALED(exit_status))
signr = WTERMSIG(exit_status);
} else
status = err;
if (rec->off_cpu)
rec->bytes_written += off_cpu_write(rec->session);
record__read_lost_samples(rec);
record__synthesize(rec, true);
/* this will be recalculated during process_buildids() */
rec->samples = 0;
if (!err) {
if (!rec->timestamp_filename) {
record__finish_output(rec);
} else {
fd = record__switch_output(rec, true);
if (fd < 0) {
status = fd;
goto out_delete_session;
}
}
}
perf_hooks__invoke_record_end();
if (!err && !quiet) {
char samples[128];
const char *postfix = rec->timestamp_filename ?
".<timestamp>" : "";
if (rec->samples && !rec->opts.full_auxtrace)
scnprintf(samples, sizeof(samples),
" (%" PRIu64 " samples)", rec->samples);
else
samples[0] = '\0';
fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
perf_data__size(data) / 1024.0 / 1024.0,
data->path, postfix, samples);
if (ratio) {
fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
rec->session->bytes_transferred / 1024.0 / 1024.0,
ratio);
}
fprintf(stderr, " ]\n");
}
out_delete_session:
#ifdef HAVE_EVENTFD_SUPPORT
if (done_fd >= 0) {
fd = done_fd;
done_fd = -1;
close(fd);
}
#endif
zstd_fini(&session->zstd_data);
perf_session__delete(session);
if (!opts->no_bpf_event)
evlist__stop_sb_thread(rec->sb_evlist);
return status;
}
static void callchain_debug(struct callchain_param *callchain)
{
static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
pr_debug("callchain: type %s\n", str[callchain->record_mode]);
if (callchain->record_mode == CALLCHAIN_DWARF)
pr_debug("callchain: stack dump size %d\n",
callchain->dump_size);
}
int record_opts__parse_callchain(struct record_opts *record,
struct callchain_param *callchain,
const char *arg, bool unset)
{
int ret;
callchain->enabled = !unset;
/* --no-call-graph */
if (unset) {
callchain->record_mode = CALLCHAIN_NONE;
pr_debug("callchain: disabled\n");
return 0;
}
ret = parse_callchain_record_opt(arg, callchain);
if (!ret) {
/* Enable data address sampling for DWARF unwind. */
if (callchain->record_mode == CALLCHAIN_DWARF)
record->sample_address = true;
callchain_debug(callchain);
}
return ret;
}
int record_parse_callchain_opt(const struct option *opt,
const char *arg,
int unset)
{
return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
}
int record_callchain_opt(const struct option *opt,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
struct callchain_param *callchain = opt->value;
callchain->enabled = true;
if (callchain->record_mode == CALLCHAIN_NONE)
callchain->record_mode = CALLCHAIN_FP;
callchain_debug(callchain);
return 0;
}
static int perf_record_config(const char *var, const char *value, void *cb)
{
struct record *rec = cb;
if (!strcmp(var, "record.build-id")) {
if (!strcmp(value, "cache"))
rec->no_buildid_cache = false;
else if (!strcmp(value, "no-cache"))
rec->no_buildid_cache = true;
else if (!strcmp(value, "skip"))
rec->no_buildid = true;
else if (!strcmp(value, "mmap"))
rec->buildid_mmap = true;
else
return -1;
return 0;
}
if (!strcmp(var, "record.call-graph")) {
var = "call-graph.record-mode";
return perf_default_config(var, value, cb);
}
#ifdef HAVE_AIO_SUPPORT
if (!strcmp(var, "record.aio")) {
rec->opts.nr_cblocks = strtol(value, NULL, 0);
if (!rec->opts.nr_cblocks)
rec->opts.nr_cblocks = nr_cblocks_default;
}
#endif
if (!strcmp(var, "record.debuginfod")) {
rec->debuginfod.urls = strdup(value);
if (!rec->debuginfod.urls)
return -ENOMEM;
rec->debuginfod.set = true;
}
return 0;
}
static int record__parse_event_enable_time(const struct option *opt, const char *str, int unset)
{
struct record *rec = (struct record *)opt->value;
return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset);
}
static int record__parse_affinity(const struct option *opt, const char *str, int unset)
{
struct record_opts *opts = (struct record_opts *)opt->value;
if (unset || !str)
return 0;
if (!strcasecmp(str, "node"))
opts->affinity = PERF_AFFINITY_NODE;
else if (!strcasecmp(str, "cpu"))
opts->affinity = PERF_AFFINITY_CPU;
return 0;
}
static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
{
mask->nbits = nr_bits;
mask->bits = bitmap_zalloc(mask->nbits);
if (!mask->bits)
return -ENOMEM;
return 0;
}
static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
{
bitmap_free(mask->bits);
mask->nbits = 0;
}
static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
{
int ret;
ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
if (ret) {
mask->affinity.bits = NULL;
return ret;
}
ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
if (ret) {
record__mmap_cpu_mask_free(&mask->maps);
mask->maps.bits = NULL;
}
return ret;
}
static void record__thread_mask_free(struct thread_mask *mask)
{
record__mmap_cpu_mask_free(&mask->maps);
record__mmap_cpu_mask_free(&mask->affinity);
}
static int record__parse_threads(const struct option *opt, const char *str, int unset)
{
int s;
struct record_opts *opts = opt->value;
if (unset || !str || !strlen(str)) {
opts->threads_spec = THREAD_SPEC__CPU;
} else {
for (s = 1; s < THREAD_SPEC__MAX; s++) {
if (s == THREAD_SPEC__USER) {
opts->threads_user_spec = strdup(str);
if (!opts->threads_user_spec)
return -ENOMEM;
opts->threads_spec = THREAD_SPEC__USER;
break;
}
if (!strncasecmp(str, thread_spec_tags[s], strlen(thread_spec_tags[s]))) {
opts->threads_spec = s;
break;
}
}
}
if (opts->threads_spec == THREAD_SPEC__USER)
pr_debug("threads_spec: %s\n", opts->threads_user_spec);
else
pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]);
return 0;
}
static int parse_output_max_size(const struct option *opt,
const char *str, int unset)
{
unsigned long *s = (unsigned long *)opt->value;
static struct parse_tag tags_size[] = {
{ .tag = 'B', .mult = 1 },
{ .tag = 'K', .mult = 1 << 10 },
{ .tag = 'M', .mult = 1 << 20 },
{ .tag = 'G', .mult = 1 << 30 },
{ .tag = 0 },
};
unsigned long val;
if (unset) {
*s = 0;
return 0;
}
val = parse_tag_value(str, tags_size);
if (val != (unsigned long) -1) {
*s = val;
return 0;
}
return -1;
}
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
struct record_opts *opts = opt->value;
char *s, *p;
unsigned int mmap_pages;
int ret;
if (!str)
return -EINVAL;
s = strdup(str);
if (!s)
return -ENOMEM;
p = strchr(s, ',');
if (p)
*p = '\0';
if (*s) {
ret = __evlist__parse_mmap_pages(&mmap_pages, s);
if (ret)
goto out_free;
opts->mmap_pages = mmap_pages;
}
if (!p) {
ret = 0;
goto out_free;
}
ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
if (ret)
goto out_free;
opts->auxtrace_mmap_pages = mmap_pages;
out_free:
free(s);
return ret;
}
void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
{
}
static int parse_control_option(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
struct record_opts *opts = opt->value;
return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
}
static void switch_output_size_warn(struct record *rec)
{
u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
struct switch_output *s = &rec->switch_output;
wakeup_size /= 2;
if (s->size < wakeup_size) {
char buf[100];
unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
pr_warning("WARNING: switch-output data size lower than "
"wakeup kernel buffer size (%s) "
"expect bigger perf.data sizes\n", buf);
}
}
static int switch_output_setup(struct record *rec)
{
struct switch_output *s = &rec->switch_output;
static struct parse_tag tags_size[] = {
{ .tag = 'B', .mult = 1 },
{ .tag = 'K', .mult = 1 << 10 },
{ .tag = 'M', .mult = 1 << 20 },
{ .tag = 'G', .mult = 1 << 30 },
{ .tag = 0 },
};
static struct parse_tag tags_time[] = {
{ .tag = 's', .mult = 1 },
{ .tag = 'm', .mult = 60 },
{ .tag = 'h', .mult = 60*60 },
{ .tag = 'd', .mult = 60*60*24 },
{ .tag = 0 },
};
unsigned long val;
/*
* If we're using --switch-output-events, then we imply its
* --switch-output=signal, as we'll send a SIGUSR2 from the side band
* thread to its parent.
*/
if (rec->switch_output_event_set) {
if (record__threads_enabled(rec)) {
pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n");
return 0;
}
goto do_signal;
}
if (!s->set)
return 0;
if (record__threads_enabled(rec)) {
pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n");
return 0;
}
if (!strcmp(s->str, "signal")) {
do_signal:
s->signal = true;
pr_debug("switch-output with SIGUSR2 signal\n");
goto enabled;
}
val = parse_tag_value(s->str, tags_size);
if (val != (unsigned long) -1) {
s->size = val;
pr_debug("switch-output with %s size threshold\n", s->str);
goto enabled;
}
val = parse_tag_value(s->str, tags_time);
if (val != (unsigned long) -1) {
s->time = val;
pr_debug("switch-output with %s time threshold (%lu seconds)\n",
s->str, s->time);
goto enabled;
}
return -1;
enabled:
rec->timestamp_filename = true;
s->enabled = true;
if (s->size && !rec->opts.no_buffering)
switch_output_size_warn(rec);
return 0;
}
static const char * const __record_usage[] = {
"perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]",
NULL
};
const char * const *record_usage = __record_usage;
static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine)
{
/*
* We already have the kernel maps, put in place via perf_session__create_kernel_maps()
* no need to add them twice.
*/
if (!(event->header.misc & PERF_RECORD_MISC_USER))
return 0;
return perf_event__process_mmap(tool, event, sample, machine);
}
static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine)
{
/*
* We already have the kernel maps, put in place via perf_session__create_kernel_maps()
* no need to add them twice.
*/
if (!(event->header.misc & PERF_RECORD_MISC_USER))
return 0;
return perf_event__process_mmap2(tool, event, sample, machine);
}
static int process_timestamp_boundary(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
struct record *rec = container_of(tool, struct record, tool);
set_timestamp_boundary(rec, sample->time);
return 0;
}
static int parse_record_synth_option(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
struct record_opts *opts = opt->value;
char *p = strdup(str);
if (p == NULL)
return -1;
opts->synth = parse_synth_opt(p);
free(p);
if (opts->synth < 0) {
pr_err("Invalid synth option: %s\n", str);
return -1;
}
return 0;
}
/*
* XXX Ideally would be local to cmd_record() and passed to a record__new
* because we need to have access to it in record__exit, that is called
* after cmd_record() exits, but since record_options need to be accessible to
* builtin-script, leave it here.
*
* At least we don't ouch it in all the other functions here directly.
*
* Just say no to tons of global variables, sigh.
*/
static struct record record = {
.opts = {
.sample_time = true,
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 4000,
.target = {
.uses_mmap = true,
.default_per_cpu = true,
},
.mmap_flush = MMAP_FLUSH_DEFAULT,
.nr_threads_synthesize = 1,
.ctl_fd = -1,
.ctl_fd_ack = -1,
.synth = PERF_SYNTH_ALL,
},
.tool = {
.sample = process_sample_event,
.fork = perf_event__process_fork,
.exit = perf_event__process_exit,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
.mmap = build_id__process_mmap,
.mmap2 = build_id__process_mmap2,
.itrace_start = process_timestamp_boundary,
.aux = process_timestamp_boundary,
.ordered_events = true,
},
};
const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
"\n\t\t\t\tDefault: fp";
static bool dry_run;
static struct parse_events_option_args parse_events_option_args = {
.evlistp = &record.evlist,
};
static struct parse_events_option_args switch_output_parse_events_option_args = {
.evlistp = &record.sb_evlist,
};
/*
* XXX Will stay a global variable till we fix builtin-script.c to stop messing
* with it and switch to use the library functions in perf_evlist that came
* from builtin-record.c, i.e. use record_opts,
* evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
* using pipes, etc.
*/
static struct option __record_options[] = {
OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_CALLBACK(0, "filter", &record.evlist, "filter",
"event filter", parse_filter),
OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
NULL, "don't record events from perf itself",
exclude_perf),
OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
"record events on existing process id"),
OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
"record events on existing thread id"),
OPT_INTEGER('r', "realtime", &record.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
"collect data without buffering"),
OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
"collect raw sample records from all opened counters"),
OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
OPT_STRING('o', "output", &record.data.path, "file",
"output file name"),
OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
&record.opts.no_inherit_set,
"child tasks do not inherit counters"),
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
"profile at this frequency",
record__parse_freq),
OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
"number of mmap data pages and AUX area tracing mmap pages",
record__parse_mmap_pages),
OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
"Minimal number of bytes that is extracted from mmap data pages (default: 1)",
record__mmap_flush_parse),
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
NULL, "enables call-graph recording" ,
&record_callchain_opt),
OPT_CALLBACK(0, "call-graph", &record.opts,
"record_mode[,record_size]", record_callchain_help,
&record_parse_callchain_opt),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "don't print any warnings or messages"),
OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
"per thread counts"),
OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
"Record the sample physical addresses"),
OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
"Record the sampled data address data page size"),
OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
"Record the sampled code address (ip) page size"),
OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
"Record the sample identifier"),
OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
&record.opts.sample_time_set,
"Record the sample timestamps"),
OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
"Record the sample period"),
OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
"don't sample"),
OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
&record.no_buildid_cache_set,
"do not update the buildid cache"),
OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
&record.no_buildid_set,
"do not collect buildids in perf.data"),
OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only",
parse_cgroups),
OPT_CALLBACK('D', "delay", &record, "ms",
"ms to wait before starting measurement after program start (-1: start with events disabled), "
"or ranges of time to enable events e.g. '-D 10-20,30-40'",
record__parse_event_enable_time),
OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
"user to profile"),
OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
"branch any", "sample any taken branches",
parse_branch_stack),
OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
"branch filter mask", "branch stack filter modes",
parse_branch_stack),
OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
"sample by weight (on special events only)"),
OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
"sample transaction flags (special events only)"),
OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
"use per-thread mmaps"),
OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
"sample selected machine registers on interrupt,"
" use '-I?' to list register names", parse_intr_regs),
OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
"sample selected machine registers on interrupt,"
" use '--user-regs=?' to list register names", parse_user_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
OPT_CALLBACK('k', "clockid", &record.opts,
"clockid", "clockid to use for events, see clock_gettime()",
parse_clockid),
OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
"opts", "AUX area tracing Snapshot Mode", ""),
OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
"opts", "sample AUX area", ""),
OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
"Record namespaces events"),
OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
"Record cgroup events"),
OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
&record.opts.record_switch_events_set,
"Record context switch events"),
OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
"Configure all used events to run in kernel space.",
PARSE_OPT_EXCLUSIVE),
OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
"Configure all used events to run in user space.",
PARSE_OPT_EXCLUSIVE),
OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
"collect kernel callchains"),
OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
"collect user callchains"),
OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
"Record build-id of all DSOs regardless of hits"),
OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
"Record build-id in map events"),
OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
"append timestamp to output filename"),
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
"Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
&record.switch_output.set, "signal or size[BKMG] or time[smhd]",
"Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
&record.switch_output_event_set, "switch output event",
"switch output event selector. use 'perf list' to list available events",
parse_events_option_new_evlist),
OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
"Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
#ifdef HAVE_AIO_SUPPORT
OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
&nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
record__aio_parse),
#endif
OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
"Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
record__parse_affinity),
#ifdef HAVE_ZSTD_SUPPORT
OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
"Compress records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
record__parse_comp_level),
#endif
OPT_CALLBACK(0, "max-size", &record.output_max_size,
"size", "Limit the maximum size of the output file", parse_output_max_size),
OPT_UINTEGER(0, "num-thread-synthesize",
&record.opts.nr_threads_synthesize,
"number of threads to run for event synthesis"),
#ifdef HAVE_LIBPFM
OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
"libpfm4 event selector. use 'perf list' to list available events",
parse_libpfm_events_option),
#endif
OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
"\t\t\t 'snapshot': AUX area tracing snapshot).\n"
"\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
"\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
parse_control_option),
OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
"Fine-tune event synthesis: default=all", parse_record_synth_option),
OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
&record.debuginfod.set, "debuginfod urls",
"Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
"system"),
OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
"write collected trace data into several data files using parallel threads",
record__parse_threads),
OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
OPT_END()
};
struct option *record_options = __record_options;
static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
{
struct perf_cpu cpu;
int idx;
if (cpu_map__is_dummy(cpus))
return 0;
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
if (cpu.cpu == -1)
continue;
/* Return ENODEV is input cpu is greater than max cpu */
if ((unsigned long)cpu.cpu > mask->nbits)
return -ENODEV;
__set_bit(cpu.cpu, mask->bits);
}
return 0;
}
static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
{
struct perf_cpu_map *cpus;
cpus = perf_cpu_map__new(mask_spec);
if (!cpus)
return -ENOMEM;
bitmap_zero(mask->bits, mask->nbits);
if (record__mmap_cpu_mask_init(mask, cpus))
return -ENODEV;
perf_cpu_map__put(cpus);
return 0;
}
static void record__free_thread_masks(struct record *rec, int nr_threads)
{
int t;
if (rec->thread_masks)
for (t = 0; t < nr_threads; t++)
record__thread_mask_free(&rec->thread_masks[t]);
zfree(&rec->thread_masks);
}
static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
{
int t, ret;
rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
if (!rec->thread_masks) {
pr_err("Failed to allocate thread masks\n");
return -ENOMEM;
}
for (t = 0; t < nr_threads; t++) {
ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
if (ret) {
pr_err("Failed to allocate thread masks[%d]\n", t);
goto out_free;
}
}
return 0;
out_free:
record__free_thread_masks(rec, nr_threads);
return ret;
}
static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
{
int t, ret, nr_cpus = perf_cpu_map__nr(cpus);
ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
if (ret)
return ret;
rec->nr_threads = nr_cpus;
pr_debug("nr_threads: %d\n", rec->nr_threads);
for (t = 0; t < rec->nr_threads; t++) {
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
if (verbose > 0) {
pr_debug("thread_masks[%d]: ", t);
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
pr_debug("thread_masks[%d]: ", t);
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
}
}
return 0;
}
static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
const char **maps_spec, const char **affinity_spec,
u32 nr_spec)
{
u32 s;
int ret = 0, t = 0;
struct mmap_cpu_mask cpus_mask;
struct thread_mask thread_mask, full_mask, *thread_masks;
ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu);
if (ret) {
pr_err("Failed to allocate CPUs mask\n");
return ret;
}
ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
if (ret) {
pr_err("Failed to init cpu mask\n");
goto out_free_cpu_mask;
}
ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
if (ret) {
pr_err("Failed to allocate full mask\n");
goto out_free_cpu_mask;
}
ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
if (ret) {
pr_err("Failed to allocate thread mask\n");
goto out_free_full_and_cpu_masks;
}
for (s = 0; s < nr_spec; s++) {
ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
if (ret) {
pr_err("Failed to initialize maps thread mask\n");
goto out_free;
}
ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
if (ret) {
pr_err("Failed to initialize affinity thread mask\n");
goto out_free;
}
/* ignore invalid CPUs but do not allow empty masks */
if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
cpus_mask.bits, thread_mask.maps.nbits)) {
pr_err("Empty maps mask: %s\n", maps_spec[s]);
ret = -EINVAL;
goto out_free;
}
if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
cpus_mask.bits, thread_mask.affinity.nbits)) {
pr_err("Empty affinity mask: %s\n", affinity_spec[s]);
ret = -EINVAL;
goto out_free;
}
/* do not allow intersection with other masks (full_mask) */
if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits,
thread_mask.maps.nbits)) {
pr_err("Intersecting maps mask: %s\n", maps_spec[s]);
ret = -EINVAL;
goto out_free;
}
if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits,
thread_mask.affinity.nbits)) {
pr_err("Intersecting affinity mask: %s\n", affinity_spec[s]);
ret = -EINVAL;
goto out_free;
}
bitmap_or(full_mask.maps.bits, full_mask.maps.bits,
thread_mask.maps.bits, full_mask.maps.nbits);
bitmap_or(full_mask.affinity.bits, full_mask.affinity.bits,
thread_mask.affinity.bits, full_mask.maps.nbits);
thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
if (!thread_masks) {
pr_err("Failed to reallocate thread masks\n");
ret = -ENOMEM;
goto out_free;
}
rec->thread_masks = thread_masks;
rec->thread_masks[t] = thread_mask;
if (verbose > 0) {
pr_debug("thread_masks[%d]: ", t);
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
pr_debug("thread_masks[%d]: ", t);
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
}
t++;
ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
if (ret) {
pr_err("Failed to allocate thread mask\n");
goto out_free_full_and_cpu_masks;
}
}
rec->nr_threads = t;
pr_debug("nr_threads: %d\n", rec->nr_threads);
if (!rec->nr_threads)
ret = -EINVAL;
out_free:
record__thread_mask_free(&thread_mask);
out_free_full_and_cpu_masks:
record__thread_mask_free(&full_mask);
out_free_cpu_mask:
record__mmap_cpu_mask_free(&cpus_mask);
return ret;
}
static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
{
int ret;
struct cpu_topology *topo;
topo = cpu_topology__new();
if (!topo) {
pr_err("Failed to allocate CPU topology\n");
return -ENOMEM;
}
ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
topo->core_cpus_list, topo->core_cpus_lists);
cpu_topology__delete(topo);
return ret;
}
static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
{
int ret;
struct cpu_topology *topo;
topo = cpu_topology__new();
if (!topo) {
pr_err("Failed to allocate CPU topology\n");
return -ENOMEM;
}
ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
topo->package_cpus_list, topo->package_cpus_lists);
cpu_topology__delete(topo);
return ret;
}
static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
{
u32 s;
int ret;
const char **spec;
struct numa_topology *topo;
topo = numa_topology__new();
if (!topo) {
pr_err("Failed to allocate NUMA topology\n");
return -ENOMEM;
}
spec = zalloc(topo->nr * sizeof(char *));
if (!spec) {
pr_err("Failed to allocate NUMA spec\n");
ret = -ENOMEM;
goto out_delete_topo;
}
for (s = 0; s < topo->nr; s++)
spec[s] = topo->nodes[s].cpus;
ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
zfree(&spec);
out_delete_topo:
numa_topology__delete(topo);
return ret;
}
static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
{
int t, ret;
u32 s, nr_spec = 0;
char **maps_spec = NULL, **affinity_spec = NULL, **tmp_spec;
char *user_spec, *spec, *spec_ptr, *mask, *mask_ptr, *dup_mask = NULL;
for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
spec = strtok_r(user_spec, ":", &spec_ptr);
if (spec == NULL)
break;
pr_debug2("threads_spec[%d]: %s\n", t, spec);
mask = strtok_r(spec, "/", &mask_ptr);
if (mask == NULL)
break;
pr_debug2(" maps mask: %s\n", mask);
tmp_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *));
if (!tmp_spec) {
pr_err("Failed to reallocate maps spec\n");
ret = -ENOMEM;
goto out_free;
}
maps_spec = tmp_spec;
maps_spec[nr_spec] = dup_mask = strdup(mask);
if (!maps_spec[nr_spec]) {
pr_err("Failed to allocate maps spec[%d]\n", nr_spec);
ret = -ENOMEM;
goto out_free;
}
mask = strtok_r(NULL, "/", &mask_ptr);
if (mask == NULL) {
pr_err("Invalid thread maps or affinity specs\n");
ret = -EINVAL;
goto out_free;
}
pr_debug2(" affinity mask: %s\n", mask);
tmp_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *));
if (!tmp_spec) {
pr_err("Failed to reallocate affinity spec\n");
ret = -ENOMEM;
goto out_free;
}
affinity_spec = tmp_spec;
affinity_spec[nr_spec] = strdup(mask);
if (!affinity_spec[nr_spec]) {
pr_err("Failed to allocate affinity spec[%d]\n", nr_spec);
ret = -ENOMEM;
goto out_free;
}
dup_mask = NULL;
nr_spec++;
}
ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
(const char **)affinity_spec, nr_spec);
out_free:
free(dup_mask);
for (s = 0; s < nr_spec; s++) {
if (maps_spec)
free(maps_spec[s]);
if (affinity_spec)
free(affinity_spec[s]);
}
free(affinity_spec);
free(maps_spec);
return ret;
}
static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
{
int ret;
ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
if (ret)
return ret;
if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
return -ENODEV;
rec->nr_threads = 1;
return 0;
}
static int record__init_thread_masks(struct record *rec)
{
int ret = 0;
struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
if (!record__threads_enabled(rec))
return record__init_thread_default_masks(rec, cpus);
if (evlist__per_thread(rec->evlist)) {
pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
return -EINVAL;
}
switch (rec->opts.threads_spec) {
case THREAD_SPEC__CPU:
ret = record__init_thread_cpu_masks(rec, cpus);
break;
case THREAD_SPEC__CORE:
ret = record__init_thread_core_masks(rec, cpus);
break;
case THREAD_SPEC__PACKAGE:
ret = record__init_thread_package_masks(rec, cpus);
break;
case THREAD_SPEC__NUMA:
ret = record__init_thread_numa_masks(rec, cpus);
break;
case THREAD_SPEC__USER:
ret = record__init_thread_user_masks(rec, cpus);
break;
default:
break;
}
return ret;
}
int cmd_record(int argc, const char **argv)
{
int err;
struct record *rec = &record;
char errbuf[BUFSIZ];
setlocale(LC_ALL, "");
#ifndef HAVE_BPF_SKEL
# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
# undef set_nobuild
#endif
rec->opts.affinity = PERF_AFFINITY_SYS;
rec->evlist = evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
err = perf_config(perf_record_config, rec);
if (err)
return err;
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (quiet)
perf_quiet_option();
err = symbol__validate_sym_arguments();
if (err)
return err;
perf_debuginfod_setup(&record.debuginfod);
/* Make system wide (-a) the default target. */
if (!argc && target__none(&rec->opts.target))
rec->opts.target.system_wide = true;
if (nr_cgroups && !rec->opts.target.system_wide) {
usage_with_options_msg(record_usage, record_options,
"cgroup monitoring only available in system-wide mode");
}
if (rec->buildid_mmap) {
if (!perf_can_record_build_id()) {
pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
err = -EINVAL;
goto out_opts;
}
pr_debug("Enabling build id in mmap2 events.\n");
/* Enable mmap build id synthesizing. */
symbol_conf.buildid_mmap2 = true;
/* Enable perf_event_attr::build_id bit. */
rec->opts.build_id = true;
/* Disable build id cache. */
rec->no_buildid = true;
}
if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
pr_err("Kernel has no cgroup sampling support.\n");
err = -EINVAL;
goto out_opts;
}
if (rec->opts.kcore)
rec->opts.text_poke = true;
if (rec->opts.kcore || record__threads_enabled(rec))
rec->data.is_dir = true;
if (record__threads_enabled(rec)) {
if (rec->opts.affinity != PERF_AFFINITY_SYS) {
pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n");
goto out_opts;
}
if (record__aio_enabled(rec)) {
pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n");
goto out_opts;
}
}
if (rec->opts.comp_level != 0) {
pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
rec->no_buildid = true;
}
if (rec->opts.record_switch_events &&
!perf_can_record_switch_events()) {
ui__error("kernel does not support recording context switch events\n");
parse_options_usage(record_usage, record_options, "switch-events", 0);
err = -EINVAL;
goto out_opts;
}
if (switch_output_setup(rec)) {
parse_options_usage(record_usage, record_options, "switch-output", 0);
err = -EINVAL;
goto out_opts;
}
if (rec->switch_output.time) {
signal(SIGALRM, alarm_sig_handler);
alarm(rec->switch_output.time);
}
if (rec->switch_output.num_files) {
rec->switch_output.filenames = calloc(sizeof(char *),
rec->switch_output.num_files);
if (!rec->switch_output.filenames) {
err = -EINVAL;
goto out_opts;
}
}
if (rec->timestamp_filename && record__threads_enabled(rec)) {
rec->timestamp_filename = false;
pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
}
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
*/
symbol_conf.allow_aliases = true;
symbol__init(NULL);
err = record__auxtrace_init(rec);
if (err)
goto out;
if (dry_run)
goto out;
err = -ENOMEM;
if (rec->no_buildid_cache || rec->no_buildid) {
disable_buildid_cache();
} else if (rec->switch_output.enabled) {
/*
* In 'perf record --switch-output', disable buildid
* generation by default to reduce data file switching
* overhead. Still generate buildid if they are required
* explicitly using
*
* perf record --switch-output --no-no-buildid \
* --no-no-buildid-cache
*
* Following code equals to:
*
* if ((rec->no_buildid || !rec->no_buildid_set) &&
* (rec->no_buildid_cache || !rec->no_buildid_cache_set))
* disable_buildid_cache();
*/
bool disable = true;
if (rec->no_buildid_set && !rec->no_buildid)
disable = false;
if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
disable = false;
if (disable) {
rec->no_buildid = true;
rec->no_buildid_cache = true;
disable_buildid_cache();
}
}
if (record.opts.overwrite)
record.opts.tail_synthesize = true;
if (rec->evlist->core.nr_entries == 0) {
bool can_profile_kernel = perf_event_paranoid_check(1);
err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
if (err)
goto out;
}
if (rec->opts.target.tid && !rec->opts.no_inherit_set)
rec->opts.no_inherit = true;
err = target__validate(&rec->opts.target);
if (err) {
target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s\n", errbuf);
}
err = target__parse_uid(&rec->opts.target);
if (err) {
int saved_errno = errno;
target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__error("%s", errbuf);
err = -saved_errno;
goto out;
}
/* Enable ignoring missing threads when -u/-p option is defined. */
rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
arch__add_leaf_frame_record_opts(&rec->opts);
err = -ENOMEM;
if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
if (rec->opts.target.pid != NULL) {
pr_err("Couldn't create thread/CPU maps: %s\n",
errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
goto out;
}
else
usage_with_options(record_usage, record_options);
}
err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
if (err)
goto out;
/*
* We take all buildids when the file contains
* AUX area tracing data because we do not decode the
* trace because it would take too long.
*/
if (rec->opts.full_auxtrace)
rec->buildid_all = true;
if (rec->opts.text_poke) {
err = record__config_text_poke(rec->evlist);
if (err) {
pr_err("record__config_text_poke failed, error %d\n", err);
goto out;
}
}
if (rec->off_cpu) {
err = record__config_off_cpu(rec);
if (err) {
pr_err("record__config_off_cpu failed, error %d\n", err);
goto out;
}
}
if (record_opts__config(&rec->opts)) {
err = -EINVAL;
goto out;
}
err = record__init_thread_masks(rec);
if (err) {
pr_err("Failed to initialize parallel data streaming masks\n");
goto out;
}
if (rec->opts.nr_cblocks > nr_cblocks_max)
rec->opts.nr_cblocks = nr_cblocks_max;
pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
if (rec->opts.comp_level > comp_level_max)
rec->opts.comp_level = comp_level_max;
pr_debug("comp level: %d\n", rec->opts.comp_level);
err = __cmd_record(&record, argc, argv);
out:
evlist__delete(rec->evlist);
symbol__exit();
auxtrace_record__free(rec->itr);
out_opts:
record__free_thread_masks(rec, rec->nr_threads);
rec->nr_threads = 0;
evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
return err;
}
static void snapshot_sig_handler(int sig __maybe_unused)
{
struct record *rec = &record;
hit_auxtrace_snapshot_trigger(rec);
if (switch_output_signal(rec))
trigger_hit(&switch_output_trigger);
}
static void alarm_sig_handler(int sig __maybe_unused)
{
struct record *rec = &record;
if (switch_output_time(rec))
trigger_hit(&switch_output_trigger);
}
| linux-master | tools/perf/builtin-record.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-kallsyms.c
*
* Builtin command: Look for a symbol in the running kernel and its modules
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include <inttypes.h>
#include "builtin.h"
#include <linux/compiler.h>
#include <subcmd/parse-options.h>
#include "debug.h"
#include "dso.h"
#include "machine.h"
#include "map.h"
#include "symbol.h"
static int __cmd_kallsyms(int argc, const char **argv)
{
int i;
struct machine *machine = machine__new_kallsyms();
if (machine == NULL) {
pr_err("Couldn't read /proc/kallsyms\n");
return -1;
}
for (i = 0; i < argc; ++i) {
struct map *map;
const struct dso *dso;
struct symbol *symbol = machine__find_kernel_symbol_by_name(machine, argv[i], &map);
if (symbol == NULL) {
printf("%s: not found\n", argv[i]);
continue;
}
dso = map__dso(map);
printf("%s: %s %s %#" PRIx64 "-%#" PRIx64 " (%#" PRIx64 "-%#" PRIx64")\n",
symbol->name, dso->short_name, dso->long_name,
map__unmap_ip(map, symbol->start), map__unmap_ip(map, symbol->end),
symbol->start, symbol->end);
}
machine__delete(machine);
return 0;
}
int cmd_kallsyms(int argc, const char **argv)
{
const struct option options[] = {
OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"),
OPT_END()
};
const char * const kallsyms_usage[] = {
"perf kallsyms [<options>] symbol_name",
NULL
};
argc = parse_options(argc, argv, options, kallsyms_usage, 0);
if (argc < 1)
usage_with_options(kallsyms_usage, options);
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (symbol__init(NULL) < 0)
return -1;
return __cmd_kallsyms(argc, argv);
}
| linux-master | tools/perf/builtin-kallsyms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Builtin evlist command: Show the list of event selectors present
* in a perf.data file.
*/
#include "builtin.h"
#include <linux/list.h>
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
#include "util/parse-events.h"
#include <subcmd/parse-options.h>
#include "util/session.h"
#include "util/data.h"
#include "util/debug.h"
#include <linux/err.h>
#include "util/tool.h"
#include "util/util.h"
static int process_header_feature(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
session_done = 1;
return 0;
}
static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
{
struct perf_session *session;
struct evsel *pos;
struct perf_data data = {
.path = file_name,
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
struct perf_tool tool = {
/* only needed for pipe mode */
.attr = perf_event__process_attr,
.feature = process_header_feature,
};
bool has_tracepoint = false;
session = perf_session__new(&data, &tool);
if (IS_ERR(session))
return PTR_ERR(session);
if (data.is_pipe)
perf_session__process_events(session);
evlist__for_each_entry(session->evlist, pos) {
evsel__fprintf(pos, details, stdout);
if (pos->core.attr.type == PERF_TYPE_TRACEPOINT)
has_tracepoint = true;
}
if (has_tracepoint && !details->trace_fields)
printf("# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events\n");
perf_session__delete(session);
return 0;
}
int cmd_evlist(int argc, const char **argv)
{
struct perf_attr_details details = { .verbose = false, };
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file", "Input file name"),
OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"),
OPT_BOOLEAN('v', "verbose", &details.verbose,
"Show all event attr details"),
OPT_BOOLEAN('g', "group", &details.event_group,
"Show event group information"),
OPT_BOOLEAN('f', "force", &details.force, "don't complain, do it"),
OPT_BOOLEAN(0, "trace-fields", &details.trace_fields, "Show tracepoint fields"),
OPT_END()
};
const char * const evlist_usage[] = {
"perf evlist [<options>]",
NULL
};
argc = parse_options(argc, argv, options, evlist_usage, 0);
if (argc)
usage_with_options(evlist_usage, options);
if (details.event_group && (details.verbose || details.freq)) {
usage_with_options_msg(evlist_usage, options,
"--group option is not compatible with other options\n");
}
return __cmd_evlist(input_name, &details);
}
| linux-master | tools/perf/builtin-evlist.c |
/*
* builtin-buildid-list.c
*
* Builtin buildid-list command: list buildids in perf.data, in the running
* kernel and in ELF files.
*
* Copyright (C) 2009, Red Hat Inc.
* Copyright (C) 2009, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "builtin.h"
#include "util/build-id.h"
#include "util/debug.h"
#include "util/dso.h"
#include "util/map.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/session.h"
#include "util/symbol.h"
#include "util/data.h"
#include "util/util.h"
#include <errno.h>
#include <inttypes.h>
#include <linux/err.h>
static int buildid__map_cb(struct map *map, void *arg __maybe_unused)
{
const struct dso *dso = map__dso(map);
char bid_buf[SBUILD_ID_SIZE];
memset(bid_buf, 0, sizeof(bid_buf));
if (dso->has_build_id)
build_id__sprintf(&dso->bid, bid_buf);
printf("%s %16" PRIx64 " %16" PRIx64, bid_buf, map__start(map), map__end(map));
if (dso->long_name != NULL) {
printf(" %s", dso->long_name);
} else if (dso->short_name != NULL) {
printf(" %s", dso->short_name);
}
printf("\n");
return 0;
}
static void buildid__show_kernel_maps(void)
{
struct machine *machine;
machine = machine__new_host();
machine__for_each_kernel_map(machine, buildid__map_cb, NULL);
machine__delete(machine);
}
static int sysfs__fprintf_build_id(FILE *fp)
{
char sbuild_id[SBUILD_ID_SIZE];
int ret;
ret = sysfs__sprintf_build_id("/", sbuild_id);
if (ret != sizeof(sbuild_id))
return ret < 0 ? ret : -EINVAL;
return fprintf(fp, "%s\n", sbuild_id);
}
static int filename__fprintf_build_id(const char *name, FILE *fp)
{
char sbuild_id[SBUILD_ID_SIZE];
int ret;
ret = filename__sprintf_build_id(name, sbuild_id);
if (ret != sizeof(sbuild_id))
return ret < 0 ? ret : -EINVAL;
return fprintf(fp, "%s\n", sbuild_id);
}
static bool dso__skip_buildid(struct dso *dso, int with_hits)
{
return with_hits && !dso->hit;
}
static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = force,
};
symbol__elf_init();
/*
* See if this is an ELF file first:
*/
if (filename__fprintf_build_id(input_name, stdout) > 0)
goto out;
session = perf_session__new(&data, &build_id__mark_dso_hit_ops);
if (IS_ERR(session))
return PTR_ERR(session);
/*
* We take all buildids when the file contains AUX area tracing data
* because we do not decode the trace because it would take too long.
*/
if (!perf_data__is_pipe(&data) &&
perf_header__has_feat(&session->header, HEADER_AUXTRACE))
with_hits = false;
if (!perf_header__has_feat(&session->header, HEADER_BUILD_ID))
with_hits = true;
if (zstd_init(&(session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
/*
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
*/
if (with_hits || perf_data__is_pipe(&data))
perf_session__process_events(session);
perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
perf_session__delete(session);
out:
return 0;
}
int cmd_buildid_list(int argc, const char **argv)
{
bool show_kernel = false;
bool show_kernel_maps = false;
bool with_hits = false;
bool force = false;
const struct option options[] = {
OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('k', "kernel", &show_kernel, "Show current kernel build id"),
OPT_BOOLEAN('m', "kernel-maps", &show_kernel_maps,
"Show build id of current kernel + modules"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_END()
};
const char * const buildid_list_usage[] = {
"perf buildid-list [<options>]",
NULL
};
argc = parse_options(argc, argv, options, buildid_list_usage, 0);
setup_pager();
if (show_kernel) {
return !(sysfs__fprintf_build_id(stdout) > 0);
} else if (show_kernel_maps) {
buildid__show_kernel_maps();
return 0;
}
return perf_session__list_build_ids(force, with_hits);
}
| linux-master | tools/perf/builtin-buildid-list.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-help.c
*
* Builtin help command
*/
#include "util/cache.h"
#include "util/config.h"
#include "util/strbuf.h"
#include "builtin.h"
#include <subcmd/exec-cmd.h>
#include "common-cmds.h"
#include <subcmd/parse-options.h>
#include <subcmd/run-command.h>
#include <subcmd/help.h>
#include "util/debug.h"
#include "util/util.h"
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
static struct man_viewer_list {
struct man_viewer_list *next;
char name[0];
} *man_viewer_list;
static struct man_viewer_info_list {
struct man_viewer_info_list *next;
const char *info;
char name[0];
} *man_viewer_info_list;
enum help_format {
HELP_FORMAT_NONE,
HELP_FORMAT_MAN,
HELP_FORMAT_INFO,
HELP_FORMAT_WEB,
};
static enum help_format parse_help_format(const char *format)
{
if (!strcmp(format, "man"))
return HELP_FORMAT_MAN;
if (!strcmp(format, "info"))
return HELP_FORMAT_INFO;
if (!strcmp(format, "web") || !strcmp(format, "html"))
return HELP_FORMAT_WEB;
pr_err("unrecognized help format '%s'", format);
return HELP_FORMAT_NONE;
}
static const char *get_man_viewer_info(const char *name)
{
struct man_viewer_info_list *viewer;
for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) {
if (!strcasecmp(name, viewer->name))
return viewer->info;
}
return NULL;
}
static int check_emacsclient_version(void)
{
struct strbuf buffer = STRBUF_INIT;
struct child_process ec_process;
const char *argv_ec[] = { "emacsclient", "--version", NULL };
int version;
int ret = -1;
/* emacsclient prints its version number on stderr */
memset(&ec_process, 0, sizeof(ec_process));
ec_process.argv = argv_ec;
ec_process.err = -1;
ec_process.stdout_to_stderr = 1;
if (start_command(&ec_process)) {
fprintf(stderr, "Failed to start emacsclient.\n");
return -1;
}
if (strbuf_read(&buffer, ec_process.err, 20) < 0) {
fprintf(stderr, "Failed to read emacsclient version\n");
goto out;
}
close(ec_process.err);
/*
* Don't bother checking return value, because "emacsclient --version"
* seems to always exits with code 1.
*/
finish_command(&ec_process);
if (!strstarts(buffer.buf, "emacsclient")) {
fprintf(stderr, "Failed to parse emacsclient version.\n");
goto out;
}
version = atoi(buffer.buf + strlen("emacsclient"));
if (version < 22) {
fprintf(stderr,
"emacsclient version '%d' too old (< 22).\n",
version);
} else
ret = 0;
out:
strbuf_release(&buffer);
return ret;
}
static void exec_failed(const char *cmd)
{
char sbuf[STRERR_BUFSIZE];
pr_warning("failed to exec '%s': %s", cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
}
static void exec_woman_emacs(const char *path, const char *page)
{
if (!check_emacsclient_version()) {
/* This works only with emacsclient version >= 22. */
char *man_page;
if (!path)
path = "emacsclient";
if (asprintf(&man_page, "(woman \"%s\")", page) > 0) {
execlp(path, "emacsclient", "-e", man_page, NULL);
free(man_page);
}
exec_failed(path);
}
}
static void exec_man_konqueror(const char *path, const char *page)
{
const char *display = getenv("DISPLAY");
if (display && *display) {
char *man_page;
const char *filename = "kfmclient";
/* It's simpler to launch konqueror using kfmclient. */
if (path) {
const char *file = strrchr(path, '/');
if (file && !strcmp(file + 1, "konqueror")) {
char *new = strdup(path);
char *dest = strrchr(new, '/');
/* strlen("konqueror") == strlen("kfmclient") */
strcpy(dest + 1, "kfmclient");
path = new;
}
if (file)
filename = file;
} else
path = "kfmclient";
if (asprintf(&man_page, "man:%s(1)", page) > 0) {
execlp(path, filename, "newTab", man_page, NULL);
free(man_page);
}
exec_failed(path);
}
}
static void exec_man_man(const char *path, const char *page)
{
if (!path)
path = "man";
execlp(path, "man", page, NULL);
exec_failed(path);
}
static void exec_man_cmd(const char *cmd, const char *page)
{
char *shell_cmd;
if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) {
execl("/bin/sh", "sh", "-c", shell_cmd, NULL);
free(shell_cmd);
}
exec_failed(cmd);
}
static void add_man_viewer(const char *name)
{
struct man_viewer_list **p = &man_viewer_list;
size_t len = strlen(name);
while (*p)
p = &((*p)->next);
*p = zalloc(sizeof(**p) + len + 1);
strcpy((*p)->name, name);
}
static int supported_man_viewer(const char *name, size_t len)
{
return (!strncasecmp("man", name, len) ||
!strncasecmp("woman", name, len) ||
!strncasecmp("konqueror", name, len));
}
static void do_add_man_viewer_info(const char *name,
size_t len,
const char *value)
{
struct man_viewer_info_list *new = zalloc(sizeof(*new) + len + 1);
strncpy(new->name, name, len);
new->info = strdup(value);
new->next = man_viewer_info_list;
man_viewer_info_list = new;
}
static void unsupported_man_viewer(const char *name, const char *var)
{
pr_warning("'%s': path for unsupported man viewer.\n"
"Please consider using 'man.<tool>.%s' instead.", name, var);
}
static int add_man_viewer_path(const char *name,
size_t len,
const char *value)
{
if (supported_man_viewer(name, len))
do_add_man_viewer_info(name, len, value);
else
unsupported_man_viewer(name, "cmd");
return 0;
}
static int add_man_viewer_cmd(const char *name,
size_t len,
const char *value)
{
if (supported_man_viewer(name, len))
unsupported_man_viewer(name, "path");
else
do_add_man_viewer_info(name, len, value);
return 0;
}
static int add_man_viewer_info(const char *var, const char *value)
{
const char *name = var + 4;
const char *subkey = strrchr(name, '.');
if (!subkey) {
pr_err("Config with no key for man viewer: %s", name);
return -1;
}
if (!strcmp(subkey, ".path")) {
if (!value)
return config_error_nonbool(var);
return add_man_viewer_path(name, subkey - name, value);
}
if (!strcmp(subkey, ".cmd")) {
if (!value)
return config_error_nonbool(var);
return add_man_viewer_cmd(name, subkey - name, value);
}
pr_warning("'%s': unsupported man viewer sub key.", subkey);
return 0;
}
static int perf_help_config(const char *var, const char *value, void *cb)
{
enum help_format *help_formatp = cb;
if (!strcmp(var, "help.format")) {
if (!value)
return config_error_nonbool(var);
*help_formatp = parse_help_format(value);
if (*help_formatp == HELP_FORMAT_NONE)
return -1;
return 0;
}
if (!strcmp(var, "man.viewer")) {
if (!value)
return config_error_nonbool(var);
add_man_viewer(value);
return 0;
}
if (strstarts(var, "man."))
return add_man_viewer_info(var, value);
return 0;
}
static struct cmdnames main_cmds, other_cmds;
void list_common_cmds_help(void)
{
unsigned int i, longest = 0;
for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
if (longest < strlen(common_cmds[i].name))
longest = strlen(common_cmds[i].name);
}
puts(" The most commonly used perf commands are:");
for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
printf(" %-*s ", longest, common_cmds[i].name);
puts(common_cmds[i].help);
}
}
static const char *cmd_to_page(const char *perf_cmd)
{
char *s;
if (!perf_cmd)
return "perf";
else if (strstarts(perf_cmd, "perf"))
return perf_cmd;
return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
}
static void setup_man_path(void)
{
char *new_path;
const char *old_path = getenv("MANPATH");
/* We should always put ':' after our path. If there is no
* old_path, the ':' at the end will let 'man' to try
* system-wide paths after ours to find the manual page. If
* there is old_path, we need ':' as delimiter. */
if (asprintf(&new_path, "%s:%s", system_path(PERF_MAN_PATH), old_path ?: "") > 0) {
setenv("MANPATH", new_path, 1);
free(new_path);
} else {
pr_err("Unable to setup man path");
}
}
static void exec_viewer(const char *name, const char *page)
{
const char *info = get_man_viewer_info(name);
if (!strcasecmp(name, "man"))
exec_man_man(info, page);
else if (!strcasecmp(name, "woman"))
exec_woman_emacs(info, page);
else if (!strcasecmp(name, "konqueror"))
exec_man_konqueror(info, page);
else if (info)
exec_man_cmd(info, page);
else
pr_warning("'%s': unknown man viewer.", name);
}
static int show_man_page(const char *perf_cmd)
{
struct man_viewer_list *viewer;
const char *page = cmd_to_page(perf_cmd);
const char *fallback = getenv("PERF_MAN_VIEWER");
setup_man_path();
for (viewer = man_viewer_list; viewer; viewer = viewer->next)
exec_viewer(viewer->name, page); /* will return when unable */
if (fallback)
exec_viewer(fallback, page);
exec_viewer("man", page);
pr_err("no man viewer handled the request");
return -1;
}
static int show_info_page(const char *perf_cmd)
{
const char *page = cmd_to_page(perf_cmd);
setenv("INFOPATH", system_path(PERF_INFO_PATH), 1);
execlp("info", "info", "perfman", page, NULL);
return -1;
}
static int get_html_page_path(char **page_path, const char *page)
{
struct stat st;
const char *html_path = system_path(PERF_HTML_PATH);
char path[PATH_MAX];
/* Check that we have a perf documentation directory. */
if (stat(mkpath(path, sizeof(path), "%s/perf.html", html_path), &st)
|| !S_ISREG(st.st_mode)) {
pr_err("'%s': not a documentation directory.", html_path);
return -1;
}
return asprintf(page_path, "%s/%s.html", html_path, page);
}
/*
* If open_html is not defined in a platform-specific way (see for
* example compat/mingw.h), we use the script web--browse to display
* HTML.
*/
#ifndef open_html
static void open_html(const char *path)
{
execl_cmd("web--browse", "-c", "help.browser", path, NULL);
}
#endif
static int show_html_page(const char *perf_cmd)
{
const char *page = cmd_to_page(perf_cmd);
char *page_path; /* it leaks but we exec bellow */
if (get_html_page_path(&page_path, page) < 0)
return -1;
open_html(page_path);
return 0;
}
int cmd_help(int argc, const char **argv)
{
bool show_all = false;
enum help_format help_format = HELP_FORMAT_MAN;
struct option builtin_help_options[] = {
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
OPT_SET_UINT('w', "web", &help_format, "show manual in web browser",
HELP_FORMAT_WEB),
OPT_SET_UINT('i', "info", &help_format, "show info page",
HELP_FORMAT_INFO),
OPT_END(),
};
const char * const builtin_help_subcommands[] = {
"buildid-cache", "buildid-list", "diff", "evlist", "help", "list",
"record", "report", "bench", "stat", "timechart", "top", "annotate",
"script", "sched", "kallsyms", "kmem", "lock", "kvm", "test", "inject", "mem", "data",
#ifdef HAVE_LIBELF_SUPPORT
"probe",
#endif
#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
"trace",
#endif
NULL };
const char *builtin_help_usage[] = {
"perf help [--all] [--man|--web|--info] [command]",
NULL
};
int rc;
load_command_list("perf-", &main_cmds, &other_cmds);
rc = perf_config(perf_help_config, &help_format);
if (rc)
return rc;
argc = parse_options_subcommand(argc, argv, builtin_help_options,
builtin_help_subcommands, builtin_help_usage, 0);
if (show_all) {
printf("\n Usage: %s\n\n", perf_usage_string);
list_commands("perf commands", &main_cmds, &other_cmds);
printf(" %s\n\n", perf_more_info_string);
return 0;
}
if (!argv[0]) {
printf("\n usage: %s\n\n", perf_usage_string);
list_common_cmds_help();
printf("\n %s\n\n", perf_more_info_string);
return 0;
}
switch (help_format) {
case HELP_FORMAT_MAN:
rc = show_man_page(argv[0]);
break;
case HELP_FORMAT_INFO:
rc = show_info_page(argv[0]);
break;
case HELP_FORMAT_WEB:
rc = show_html_page(argv[0]);
break;
case HELP_FORMAT_NONE:
/* fall-through */
default:
rc = -1;
break;
}
return rc;
}
| linux-master | tools/perf/builtin-help.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "builtin.h"
#include <subcmd/parse-options.h>
#include "util/auxtrace.h"
#include "util/trace-event.h"
#include "util/tool.h"
#include "util/session.h"
#include "util/data.h"
#include "util/map_symbol.h"
#include "util/mem-events.h"
#include "util/debug.h"
#include "util/dso.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/pmus.h"
#include "util/sample.h"
#include "util/string2.h"
#include "util/util.h"
#include <linux/err.h>
#define MEM_OPERATION_LOAD 0x1
#define MEM_OPERATION_STORE 0x2
struct perf_mem {
struct perf_tool tool;
char const *input_name;
bool hide_unresolved;
bool dump_raw;
bool force;
bool phys_addr;
bool data_page_size;
int operation;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
static int parse_record_events(const struct option *opt,
const char *str, int unset __maybe_unused)
{
struct perf_mem *mem = *(struct perf_mem **)opt->value;
if (!strcmp(str, "list")) {
perf_mem_events__list();
exit(0);
}
if (perf_mem_events__parse(str))
exit(-1);
mem->operation = 0;
return 0;
}
static const char * const __usage[] = {
"perf mem record [<options>] [<command>]",
"perf mem record [<options>] -- <command> [<options>]",
NULL
};
static const char * const *record_mem_usage = __usage;
static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
{
int rec_argc, i = 0, j, tmp_nr = 0;
int start, end;
const char **rec_argv;
char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
struct perf_mem_event *e;
struct option options[] = {
OPT_CALLBACK('e', "event", &mem, "event",
"event selector. use 'perf mem record -e list' to list available events",
parse_record_events),
OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('U', "all-user", &all_user, "collect only user level data"),
OPT_BOOLEAN('K', "all-kernel", &all_kernel, "collect only kernel level data"),
OPT_END()
};
if (perf_mem_events__init()) {
pr_err("failed: memory events not supported\n");
return -1;
}
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
/* Max number of arguments multiplied by number of PMUs that can support them. */
rec_argc = argc + 9 * perf_pmus__num_mem_pmus();
if (mem->cpu_list)
rec_argc += 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
/*
* Save the allocated event name strings.
*/
rec_tmp = calloc(rec_argc + 1, sizeof(char *));
if (!rec_tmp) {
free(rec_argv);
return -1;
}
rec_argv[i++] = "record";
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
/*
* The load and store operations are required, use the event
* PERF_MEM_EVENTS__LOAD_STORE if it is supported.
*/
if (e->tag &&
(mem->operation & MEM_OPERATION_LOAD) &&
(mem->operation & MEM_OPERATION_STORE)) {
e->record = true;
rec_argv[i++] = "-W";
} else {
if (mem->operation & MEM_OPERATION_LOAD) {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
e->record = true;
}
if (mem->operation & MEM_OPERATION_STORE) {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
e->record = true;
}
}
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
if (e->record)
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
if (mem->phys_addr)
rec_argv[i++] = "--phys-data";
if (mem->data_page_size)
rec_argv[i++] = "--data-page-size";
start = i;
ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &tmp_nr);
if (ret)
goto out;
end = i;
if (all_user)
rec_argv[i++] = "--all-user";
if (all_kernel)
rec_argv[i++] = "--all-kernel";
if (mem->cpu_list) {
rec_argv[i++] = "-C";
rec_argv[i++] = mem->cpu_list;
}
for (j = 0; j < argc; j++, i++)
rec_argv[i] = argv[j];
if (verbose > 0) {
pr_debug("calling: record ");
for (j = start; j < end; j++)
pr_debug("%s ", rec_argv[j]);
pr_debug("\n");
}
ret = cmd_record(i, rec_argv);
out:
for (i = 0; i < tmp_nr; i++)
free(rec_tmp[i]);
free(rec_tmp);
free(rec_argv);
return ret;
}
static int
dump_raw_samples(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_mem *mem = container_of(tool, struct perf_mem, tool);
struct addr_location al;
const char *fmt, *field_sep;
char str[PAGE_SIZE_NAME_LEN];
struct dso *dso = NULL;
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
addr_location__exit(&al);
return -1;
}
if (al.filtered || (mem->hide_unresolved && al.sym == NULL))
goto out_put;
if (al.map != NULL) {
dso = map__dso(al.map);
if (dso)
dso->hit = 1;
}
field_sep = symbol_conf.field_sep;
if (field_sep) {
fmt = "%d%s%d%s0x%"PRIx64"%s0x%"PRIx64"%s";
} else {
fmt = "%5d%s%5d%s0x%016"PRIx64"%s0x016%"PRIx64"%s";
symbol_conf.field_sep = " ";
}
printf(fmt,
sample->pid,
symbol_conf.field_sep,
sample->tid,
symbol_conf.field_sep,
sample->ip,
symbol_conf.field_sep,
sample->addr,
symbol_conf.field_sep);
if (mem->phys_addr) {
printf("0x%016"PRIx64"%s",
sample->phys_addr,
symbol_conf.field_sep);
}
if (mem->data_page_size) {
printf("%s%s",
get_page_size_name(sample->data_page_size, str),
symbol_conf.field_sep);
}
if (field_sep)
fmt = "%"PRIu64"%s0x%"PRIx64"%s%s:%s\n";
else
fmt = "%5"PRIu64"%s0x%06"PRIx64"%s%s:%s\n";
printf(fmt,
sample->weight,
symbol_conf.field_sep,
sample->data_src,
symbol_conf.field_sep,
dso ? dso->long_name : "???",
al.sym ? al.sym->name : "???");
out_put:
addr_location__exit(&al);
return 0;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
struct machine *machine)
{
return dump_raw_samples(tool, event, sample, machine);
}
static int report_raw_events(struct perf_mem *mem)
{
struct itrace_synth_opts itrace_synth_opts = {
.set = true,
.mem = true, /* Only enable memory event */
.default_no_sample = true,
};
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = mem->force,
};
int ret;
struct perf_session *session = perf_session__new(&data, &mem->tool);
if (IS_ERR(session))
return PTR_ERR(session);
session->itrace_synth_opts = &itrace_synth_opts;
if (mem->cpu_list) {
ret = perf_session__cpu_bitmap(session, mem->cpu_list,
mem->cpu_bitmap);
if (ret < 0)
goto out_delete;
}
ret = symbol__init(&session->header.env);
if (ret < 0)
goto out_delete;
printf("# PID, TID, IP, ADDR, ");
if (mem->phys_addr)
printf("PHYS ADDR, ");
if (mem->data_page_size)
printf("DATA PAGE SIZE, ");
printf("LOCAL WEIGHT, DSRC, SYMBOL\n");
ret = perf_session__process_events(session);
out_delete:
perf_session__delete(session);
return ret;
}
static char *get_sort_order(struct perf_mem *mem)
{
bool has_extra_options = (mem->phys_addr | mem->data_page_size) ? true : false;
char sort[128];
/*
* there is no weight (cost) associated with stores, so don't print
* the column
*/
if (!(mem->operation & MEM_OPERATION_LOAD)) {
strcpy(sort, "--sort=mem,sym,dso,symbol_daddr,"
"dso_daddr,tlb,locked");
} else if (has_extra_options) {
strcpy(sort, "--sort=local_weight,mem,sym,dso,symbol_daddr,"
"dso_daddr,snoop,tlb,locked,blocked");
} else
return NULL;
if (mem->phys_addr)
strcat(sort, ",phys_daddr");
if (mem->data_page_size)
strcat(sort, ",data_page_size");
return strdup(sort);
}
static int report_events(int argc, const char **argv, struct perf_mem *mem)
{
const char **rep_argv;
int ret, i = 0, j, rep_argc;
char *new_sort_order;
if (mem->dump_raw)
return report_raw_events(mem);
rep_argc = argc + 3;
rep_argv = calloc(rep_argc + 1, sizeof(char *));
if (!rep_argv)
return -1;
rep_argv[i++] = "report";
rep_argv[i++] = "--mem-mode";
rep_argv[i++] = "-n"; /* display number of samples */
new_sort_order = get_sort_order(mem);
if (new_sort_order)
rep_argv[i++] = new_sort_order;
for (j = 1; j < argc; j++, i++)
rep_argv[i] = argv[j];
ret = cmd_report(i, rep_argv);
free(rep_argv);
return ret;
}
struct mem_mode {
const char *name;
int mode;
};
#define MEM_OPT(n, m) \
{ .name = n, .mode = (m) }
#define MEM_END { .name = NULL }
static const struct mem_mode mem_modes[]={
MEM_OPT("load", MEM_OPERATION_LOAD),
MEM_OPT("store", MEM_OPERATION_STORE),
MEM_END
};
static int
parse_mem_ops(const struct option *opt, const char *str, int unset)
{
int *mode = (int *)opt->value;
const struct mem_mode *m;
char *s, *os = NULL, *p;
int ret = -1;
if (unset)
return 0;
/* str may be NULL in case no arg is passed to -t */
if (str) {
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
/* reset mode */
*mode = 0;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
for (m = mem_modes; m->name; m++) {
if (!strcasecmp(s, m->name))
break;
}
if (!m->name) {
fprintf(stderr, "unknown sampling op %s,"
" check man page\n", s);
goto error;
}
*mode |= m->mode;
if (!p)
break;
s = p + 1;
}
}
ret = 0;
if (*mode == 0)
*mode = MEM_OPERATION_LOAD;
error:
free(os);
return ret;
}
int cmd_mem(int argc, const char **argv)
{
struct stat st;
struct perf_mem mem = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.lost = perf_event__process_lost,
.fork = perf_event__process_fork,
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
.namespaces = perf_event__process_namespaces,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.auxtrace_error = perf_event__process_auxtrace_error,
.ordered_events = true,
},
.input_name = "perf.data",
/*
* default to both load an store sampling
*/
.operation = MEM_OPERATION_LOAD | MEM_OPERATION_STORE,
};
const struct option mem_options[] = {
OPT_CALLBACK('t', "type", &mem.operation,
"type", "memory operations(load,store) Default load,store",
parse_mem_ops),
OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw,
"dump raw samples in ASCII"),
OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved,
"Only display entries resolved to a symbol"),
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_STRING('C', "cpu", &mem.cpu_list, "cpu",
"list of cpus to profile"),
OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep,
"separator",
"separator for columns, no spaces will be added"
" between columns '.' is reserved."),
OPT_BOOLEAN('f', "force", &mem.force, "don't complain, do it"),
OPT_BOOLEAN('p', "phys-data", &mem.phys_addr, "Record/Report sample physical addresses"),
OPT_BOOLEAN(0, "data-page-size", &mem.data_page_size, "Record/Report sample data address page size"),
OPT_END()
};
const char *const mem_subcommands[] = { "record", "report", NULL };
const char *mem_usage[] = {
NULL,
NULL
};
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
mem_usage, PARSE_OPT_KEEP_UNKNOWN);
if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
usage_with_options(mem_usage, mem_options);
if (!mem.input_name || !strlen(mem.input_name)) {
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
mem.input_name = "-";
else
mem.input_name = "perf.data";
}
if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
return __cmd_record(argc, argv, &mem);
else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
return report_events(argc, argv, &mem);
else
usage_with_options(mem_usage, mem_options);
return 0;
}
| linux-master | tools/perf/builtin-mem.c |
// SPDX-License-Identifier: GPL-2.0
#include "../util/string2.h"
#include "../util/config.h"
#include "libslang.h"
#include "ui.h"
#include "util.h"
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/string.h>
#include <stdlib.h>
#include <sys/ttydefaults.h>
#include "browser.h"
#include "helpline.h"
#include "keysyms.h"
#include "../util/color.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
static int ui_browser__percent_color(struct ui_browser *browser,
double percent, bool current)
{
if (current && (!browser->use_navkeypressed || browser->navkeypressed))
return HE_COLORSET_SELECTED;
if (percent >= MIN_RED)
return HE_COLORSET_TOP;
if (percent >= MIN_GREEN)
return HE_COLORSET_MEDIUM;
return HE_COLORSET_NORMAL;
}
int ui_browser__set_color(struct ui_browser *browser, int color)
{
int ret = browser->current_color;
browser->current_color = color;
SLsmg_set_color(color);
return ret;
}
void ui_browser__set_percent_color(struct ui_browser *browser,
double percent, bool current)
{
int color = ui_browser__percent_color(browser, percent, current);
ui_browser__set_color(browser, color);
}
void ui_browser__gotorc_title(struct ui_browser *browser, int y, int x)
{
SLsmg_gotorc(browser->y + y, browser->x + x);
}
void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
{
SLsmg_gotorc(browser->y + y + browser->extra_title_lines, browser->x + x);
}
void ui_browser__write_nstring(struct ui_browser *browser __maybe_unused, const char *msg,
unsigned int width)
{
SLsmg_write_nstring(msg, width);
}
void ui_browser__vprintf(struct ui_browser *browser __maybe_unused, const char *fmt, va_list args)
{
SLsmg_vprintf(fmt, args);
}
void ui_browser__printf(struct ui_browser *browser __maybe_unused, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
ui_browser__vprintf(browser, fmt, args);
va_end(args);
}
static struct list_head *
ui_browser__list_head_filter_entries(struct ui_browser *browser,
struct list_head *pos)
{
do {
if (!browser->filter || !browser->filter(browser, pos))
return pos;
pos = pos->next;
} while (pos != browser->entries);
return NULL;
}
static struct list_head *
ui_browser__list_head_filter_prev_entries(struct ui_browser *browser,
struct list_head *pos)
{
do {
if (!browser->filter || !browser->filter(browser, pos))
return pos;
pos = pos->prev;
} while (pos != browser->entries);
return NULL;
}
void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence)
{
struct list_head *head = browser->entries;
struct list_head *pos;
if (browser->nr_entries == 0)
return;
switch (whence) {
case SEEK_SET:
pos = ui_browser__list_head_filter_entries(browser, head->next);
break;
case SEEK_CUR:
pos = browser->top;
break;
case SEEK_END:
pos = ui_browser__list_head_filter_prev_entries(browser, head->prev);
break;
default:
return;
}
assert(pos != NULL);
if (offset > 0) {
while (offset-- != 0)
pos = ui_browser__list_head_filter_entries(browser, pos->next);
} else {
while (offset++ != 0)
pos = ui_browser__list_head_filter_prev_entries(browser, pos->prev);
}
browser->top = pos;
}
void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence)
{
struct rb_root *root = browser->entries;
struct rb_node *nd;
switch (whence) {
case SEEK_SET:
nd = rb_first(root);
break;
case SEEK_CUR:
nd = browser->top;
break;
case SEEK_END:
nd = rb_last(root);
break;
default:
return;
}
if (offset > 0) {
while (offset-- != 0)
nd = rb_next(nd);
} else {
while (offset++ != 0)
nd = rb_prev(nd);
}
browser->top = nd;
}
unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
{
struct rb_node *nd;
int row = 0;
if (browser->top == NULL)
browser->top = rb_first(browser->entries);
nd = browser->top;
while (nd != NULL) {
ui_browser__gotorc(browser, row, 0);
browser->write(browser, nd, row);
if (++row == browser->rows)
break;
nd = rb_next(nd);
}
return row;
}
bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row)
{
return browser->top_idx + row == browser->index;
}
void ui_browser__refresh_dimensions(struct ui_browser *browser)
{
browser->width = SLtt_Screen_Cols - 1;
browser->height = browser->rows = SLtt_Screen_Rows - 2;
browser->rows -= browser->extra_title_lines;
browser->y = 1;
browser->x = 0;
}
void ui_browser__handle_resize(struct ui_browser *browser)
{
ui__refresh_dimensions(false);
ui_browser__show(browser, browser->title, ui_helpline__current);
ui_browser__refresh(browser);
}
int ui_browser__warning(struct ui_browser *browser, int timeout,
const char *format, ...)
{
va_list args;
char *text;
int key = 0, err;
va_start(args, format);
err = vasprintf(&text, format, args);
va_end(args);
if (err < 0) {
va_start(args, format);
ui_helpline__vpush(format, args);
va_end(args);
} else {
while ((key = ui__question_window("Warning!", text,
"Press any key...",
timeout)) == K_RESIZE)
ui_browser__handle_resize(browser);
free(text);
}
return key;
}
int ui_browser__help_window(struct ui_browser *browser, const char *text)
{
int key;
while ((key = ui__help_window(text)) == K_RESIZE)
ui_browser__handle_resize(browser);
return key;
}
bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text)
{
int key;
while ((key = ui__dialog_yesno(text)) == K_RESIZE)
ui_browser__handle_resize(browser);
return key == K_ENTER || toupper(key) == 'Y';
}
void ui_browser__reset_index(struct ui_browser *browser)
{
browser->index = browser->top_idx = 0;
browser->seek(browser, 0, SEEK_SET);
}
void __ui_browser__show_title(struct ui_browser *browser, const char *title)
{
SLsmg_gotorc(0, 0);
ui_browser__set_color(browser, HE_COLORSET_ROOT);
ui_browser__write_nstring(browser, title, browser->width + 1);
}
void ui_browser__show_title(struct ui_browser *browser, const char *title)
{
mutex_lock(&ui__lock);
__ui_browser__show_title(browser, title);
mutex_unlock(&ui__lock);
}
int ui_browser__show(struct ui_browser *browser, const char *title,
const char *helpline, ...)
{
int err;
va_list ap;
if (browser->refresh_dimensions == NULL)
browser->refresh_dimensions = ui_browser__refresh_dimensions;
browser->refresh_dimensions(browser);
mutex_lock(&ui__lock);
__ui_browser__show_title(browser, title);
browser->title = title;
zfree(&browser->helpline);
va_start(ap, helpline);
err = vasprintf(&browser->helpline, helpline, ap);
va_end(ap);
if (err > 0)
ui_helpline__push(browser->helpline);
mutex_unlock(&ui__lock);
return err ? 0 : -1;
}
void ui_browser__hide(struct ui_browser *browser)
{
mutex_lock(&ui__lock);
ui_helpline__pop();
zfree(&browser->helpline);
mutex_unlock(&ui__lock);
}
static void ui_browser__scrollbar_set(struct ui_browser *browser)
{
int height = browser->height, h = 0, pct = 0,
col = browser->width,
row = 0;
if (browser->nr_entries > 1) {
pct = ((browser->index * (browser->height - 1)) /
(browser->nr_entries - 1));
}
SLsmg_set_char_set(1);
while (h < height) {
ui_browser__gotorc(browser, row++, col);
SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_CKBRD_CHAR);
++h;
}
SLsmg_set_char_set(0);
}
static int __ui_browser__refresh(struct ui_browser *browser)
{
int row;
int width = browser->width;
row = browser->refresh(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
if (!browser->use_navkeypressed || browser->navkeypressed)
ui_browser__scrollbar_set(browser);
else
width += 1;
SLsmg_fill_region(browser->y + row + browser->extra_title_lines, browser->x,
browser->rows - row, width, ' ');
if (browser->nr_entries == 0 && browser->no_samples_msg)
__ui__info_window(NULL, browser->no_samples_msg, NULL);
return 0;
}
int ui_browser__refresh(struct ui_browser *browser)
{
mutex_lock(&ui__lock);
__ui_browser__refresh(browser);
mutex_unlock(&ui__lock);
return 0;
}
/*
* Here we're updating nr_entries _after_ we started browsing, i.e. we have to
* forget about any reference to any entry in the underlying data structure,
* that is why we do a SEEK_SET. Think about 'perf top' in the hists browser
* after an output_resort and hist decay.
*/
void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries)
{
off_t offset = nr_entries - browser->nr_entries;
browser->nr_entries = nr_entries;
if (offset < 0) {
if (browser->top_idx < (u64)-offset)
offset = -browser->top_idx;
browser->index += offset;
browser->top_idx += offset;
}
browser->top = NULL;
browser->seek(browser, browser->top_idx, SEEK_SET);
}
int ui_browser__run(struct ui_browser *browser, int delay_secs)
{
int err, key;
while (1) {
off_t offset;
mutex_lock(&ui__lock);
err = __ui_browser__refresh(browser);
SLsmg_refresh();
mutex_unlock(&ui__lock);
if (err < 0)
break;
key = ui__getch(delay_secs);
if (key == K_RESIZE) {
ui__refresh_dimensions(false);
browser->refresh_dimensions(browser);
__ui_browser__show_title(browser, browser->title);
ui_helpline__puts(browser->helpline);
continue;
}
if (browser->use_navkeypressed && !browser->navkeypressed) {
if (key == K_DOWN || key == K_UP ||
(browser->columns && (key == K_LEFT || key == K_RIGHT)) ||
key == K_PGDN || key == K_PGUP ||
key == K_HOME || key == K_END ||
key == ' ') {
browser->navkeypressed = true;
continue;
} else
return key;
}
switch (key) {
case K_DOWN:
if (browser->index == browser->nr_entries - 1)
break;
++browser->index;
if (browser->index == browser->top_idx + browser->rows) {
++browser->top_idx;
browser->seek(browser, +1, SEEK_CUR);
}
break;
case K_UP:
if (browser->index == 0)
break;
--browser->index;
if (browser->index < browser->top_idx) {
--browser->top_idx;
browser->seek(browser, -1, SEEK_CUR);
}
break;
case K_RIGHT:
if (!browser->columns)
goto out;
if (browser->horiz_scroll < browser->columns - 1)
++browser->horiz_scroll;
break;
case K_LEFT:
if (!browser->columns)
goto out;
if (browser->horiz_scroll != 0)
--browser->horiz_scroll;
break;
case K_PGDN:
case ' ':
if (browser->top_idx + browser->rows > browser->nr_entries - 1)
break;
offset = browser->rows;
if (browser->index + offset > browser->nr_entries - 1)
offset = browser->nr_entries - 1 - browser->index;
browser->index += offset;
browser->top_idx += offset;
browser->seek(browser, +offset, SEEK_CUR);
break;
case K_PGUP:
if (browser->top_idx == 0)
break;
if (browser->top_idx < browser->rows)
offset = browser->top_idx;
else
offset = browser->rows;
browser->index -= offset;
browser->top_idx -= offset;
browser->seek(browser, -offset, SEEK_CUR);
break;
case K_HOME:
ui_browser__reset_index(browser);
break;
case K_END:
offset = browser->rows - 1;
if (offset >= browser->nr_entries)
offset = browser->nr_entries - 1;
browser->index = browser->nr_entries - 1;
browser->top_idx = browser->index - offset;
browser->seek(browser, -offset, SEEK_END);
break;
default:
out:
return key;
}
}
return -1;
}
unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
{
struct list_head *pos;
struct list_head *head = browser->entries;
int row = 0;
if (browser->top == NULL || browser->top == browser->entries)
browser->top = ui_browser__list_head_filter_entries(browser, head->next);
pos = browser->top;
list_for_each_from(pos, head) {
if (!browser->filter || !browser->filter(browser, pos)) {
ui_browser__gotorc(browser, row, 0);
browser->write(browser, pos, row);
if (++row == browser->rows)
break;
}
}
return row;
}
static struct ui_browser_colorset {
const char *name, *fg, *bg;
int colorset;
} ui_browser__colorsets[] = {
{
.colorset = HE_COLORSET_TOP,
.name = "top",
.fg = "red",
.bg = "default",
},
{
.colorset = HE_COLORSET_MEDIUM,
.name = "medium",
.fg = "green",
.bg = "default",
},
{
.colorset = HE_COLORSET_NORMAL,
.name = "normal",
.fg = "default",
.bg = "default",
},
{
.colorset = HE_COLORSET_SELECTED,
.name = "selected",
.fg = "black",
.bg = "yellow",
},
{
.colorset = HE_COLORSET_JUMP_ARROWS,
.name = "jump_arrows",
.fg = "blue",
.bg = "default",
},
{
.colorset = HE_COLORSET_ADDR,
.name = "addr",
.fg = "magenta",
.bg = "default",
},
{
.colorset = HE_COLORSET_ROOT,
.name = "root",
.fg = "white",
.bg = "blue",
},
{
.name = NULL,
}
};
static int ui_browser__color_config(const char *var, const char *value,
void *data __maybe_unused)
{
char *fg = NULL, *bg;
int i;
/* same dir for all commands */
if (!strstarts(var, "colors.") != 0)
return 0;
for (i = 0; ui_browser__colorsets[i].name != NULL; ++i) {
const char *name = var + 7;
if (strcmp(ui_browser__colorsets[i].name, name) != 0)
continue;
fg = strdup(value);
if (fg == NULL)
break;
bg = strchr(fg, ',');
if (bg == NULL)
break;
*bg = '\0';
bg = skip_spaces(bg + 1);
ui_browser__colorsets[i].bg = bg;
ui_browser__colorsets[i].fg = fg;
return 0;
}
free(fg);
return -1;
}
void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
{
switch (whence) {
case SEEK_SET:
browser->top = browser->entries;
break;
case SEEK_CUR:
browser->top = (char **)browser->top + offset;
break;
case SEEK_END:
browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset;
break;
default:
return;
}
assert((char **)browser->top < (char **)browser->entries + browser->nr_entries);
assert((char **)browser->top >= (char **)browser->entries);
}
unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
{
unsigned int row = 0, idx = browser->top_idx;
char **pos;
if (browser->top == NULL)
browser->top = browser->entries;
pos = (char **)browser->top;
while (idx < browser->nr_entries &&
row < (unsigned)SLtt_Screen_Rows - 1) {
assert(pos < (char **)browser->entries + browser->nr_entries);
if (!browser->filter || !browser->filter(browser, *pos)) {
ui_browser__gotorc(browser, row, 0);
browser->write(browser, pos, row);
if (++row == browser->rows)
break;
}
++idx;
++pos;
}
return row;
}
void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
u16 start, u16 end)
{
SLsmg_set_char_set(1);
ui_browser__gotorc(browser, start, column);
SLsmg_draw_vline(end - start + 1);
SLsmg_set_char_set(0);
}
void ui_browser__write_graph(struct ui_browser *browser __maybe_unused,
int graph)
{
SLsmg_set_char_set(1);
SLsmg_write_char(graph);
SLsmg_set_char_set(0);
}
static void __ui_browser__line_arrow_up(struct ui_browser *browser,
unsigned int column,
u64 start, u64 end)
{
unsigned int row, end_row;
SLsmg_set_char_set(1);
if (start < browser->top_idx + browser->rows) {
row = start - browser->top_idx;
ui_browser__gotorc(browser, row, column);
SLsmg_write_char(SLSMG_LLCORN_CHAR);
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
if (row-- == 0)
goto out;
} else
row = browser->rows - 1;
if (end > browser->top_idx)
end_row = end - browser->top_idx;
else
end_row = 0;
ui_browser__gotorc(browser, end_row, column);
SLsmg_draw_vline(row - end_row + 1);
ui_browser__gotorc(browser, end_row, column);
if (end >= browser->top_idx) {
SLsmg_write_char(SLSMG_ULCORN_CHAR);
ui_browser__gotorc(browser, end_row, column + 1);
SLsmg_write_char(SLSMG_HLINE_CHAR);
ui_browser__gotorc(browser, end_row, column + 2);
SLsmg_write_char(SLSMG_RARROW_CHAR);
}
out:
SLsmg_set_char_set(0);
}
static void __ui_browser__line_arrow_down(struct ui_browser *browser,
unsigned int column,
u64 start, u64 end)
{
unsigned int row, end_row;
SLsmg_set_char_set(1);
if (start >= browser->top_idx) {
row = start - browser->top_idx;
ui_browser__gotorc(browser, row, column);
SLsmg_write_char(SLSMG_ULCORN_CHAR);
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
if (++row == 0)
goto out;
} else
row = 0;
if (end >= browser->top_idx + browser->rows)
end_row = browser->rows - 1;
else
end_row = end - browser->top_idx;
ui_browser__gotorc(browser, row, column);
SLsmg_draw_vline(end_row - row + 1);
ui_browser__gotorc(browser, end_row, column);
if (end < browser->top_idx + browser->rows) {
SLsmg_write_char(SLSMG_LLCORN_CHAR);
ui_browser__gotorc(browser, end_row, column + 1);
SLsmg_write_char(SLSMG_HLINE_CHAR);
ui_browser__gotorc(browser, end_row, column + 2);
SLsmg_write_char(SLSMG_RARROW_CHAR);
}
out:
SLsmg_set_char_set(0);
}
void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
u64 start, u64 end)
{
if (start > end)
__ui_browser__line_arrow_up(browser, column, start, end);
else
__ui_browser__line_arrow_down(browser, column, start, end);
}
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
unsigned int row, int diff, bool arrow_down)
{
int end_row;
if (diff <= 0)
return;
SLsmg_set_char_set(1);
if (arrow_down) {
if (row + diff <= browser->top_idx)
return;
end_row = row + diff - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_draw_vline(1);
}
end_row = (int)(row - browser->top_idx);
if (end_row >= 0) {
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_ULCORN_CHAR);
ui_browser__gotorc(browser, end_row, column);
SLsmg_draw_hline(2);
}
} else {
if (row < browser->top_idx)
return;
end_row = row - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
ui_browser__gotorc(browser, end_row, column);
SLsmg_draw_hline(2);
}
SLsmg_set_char_set(0);
}
void ui_browser__init(void)
{
int i = 0;
perf_config(ui_browser__color_config, NULL);
while (ui_browser__colorsets[i].name) {
struct ui_browser_colorset *c = &ui_browser__colorsets[i++];
SLtt_set_color(c->colorset, c->name, c->fg, c->bg);
}
}
| linux-master | tools/perf/ui/browser.c |
// SPDX-License-Identifier: GPL-2.0
#include "util.h"
#include "../util/debug.h"
#include <stdio.h>
/*
* Default error logging functions
*/
static int perf_stdio__error(const char *format, va_list args)
{
fprintf(stderr, "Error:\n");
vfprintf(stderr, format, args);
return 0;
}
static int perf_stdio__warning(const char *format, va_list args)
{
if (quiet)
return 0;
fprintf(stderr, "Warning:\n");
vfprintf(stderr, format, args);
return 0;
}
static struct perf_error_ops default_eops =
{
.error = perf_stdio__error,
.warning = perf_stdio__warning,
};
static struct perf_error_ops *perf_eops = &default_eops;
int ui__error(const char *format, ...)
{
int ret;
va_list args;
va_start(args, format);
ret = perf_eops->error(format, args);
va_end(args);
return ret;
}
int ui__warning(const char *format, ...)
{
int ret;
va_list args;
if (quiet)
return 0;
va_start(args, format);
ret = perf_eops->warning(format, args);
va_end(args);
return ret;
}
/**
* perf_error__register - Register error logging functions
* @eops: The pointer to error logging function struct
*
* Register UI-specific error logging functions. Before calling this,
* other logging functions should be unregistered, if any.
*/
int perf_error__register(struct perf_error_ops *eops)
{
if (perf_eops != &default_eops)
return -1;
perf_eops = eops;
return 0;
}
/**
* perf_error__unregister - Unregister error logging functions
* @eops: The pointer to error logging function struct
*
* Unregister already registered error logging functions.
*/
int perf_error__unregister(struct perf_error_ops *eops)
{
if (perf_eops != eops)
return -1;
perf_eops = &default_eops;
return 0;
}
| linux-master | tools/perf/ui/util.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "helpline.h"
#include "ui.h"
char ui_helpline__current[512];
static void nop_helpline__pop(void)
{
}
static void nop_helpline__push(const char *msg __maybe_unused)
{
}
static int nop_helpline__show(const char *fmt __maybe_unused,
va_list ap __maybe_unused)
{
return 0;
}
static struct ui_helpline default_helpline_fns = {
.pop = nop_helpline__pop,
.push = nop_helpline__push,
.show = nop_helpline__show,
};
struct ui_helpline *helpline_fns = &default_helpline_fns;
void ui_helpline__pop(void)
{
helpline_fns->pop();
}
void ui_helpline__push(const char *msg)
{
helpline_fns->push(msg);
}
void ui_helpline__vpush(const char *fmt, va_list ap)
{
char *s;
if (vasprintf(&s, fmt, ap) < 0)
vfprintf(stderr, fmt, ap);
else {
ui_helpline__push(s);
free(s);
}
}
void ui_helpline__fpush(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
ui_helpline__vpush(fmt, ap);
va_end(ap);
}
void ui_helpline__puts(const char *msg)
{
ui_helpline__pop();
ui_helpline__push(msg);
}
int ui_helpline__vshow(const char *fmt, va_list ap)
{
return helpline_fns->show(fmt, ap);
}
void ui_helpline__printf(const char *fmt, ...)
{
va_list ap;
ui_helpline__pop();
va_start(ap, fmt);
ui_helpline__vpush(fmt, ap);
va_end(ap);
}
| linux-master | tools/perf/ui/helpline.c |
// SPDX-License-Identifier: GPL-2.0
#include <dlfcn.h>
#include <signal.h>
#include <unistd.h>
#include <subcmd/pager.h>
#include "../util/debug.h"
#include "../util/hist.h"
#include "ui.h"
struct mutex ui__lock;
void *perf_gtk_handle;
int use_browser = -1;
#define PERF_GTK_DSO "libperf-gtk.so"
#ifdef HAVE_GTK2_SUPPORT
static int setup_gtk_browser(void)
{
int (*perf_ui_init)(void);
if (perf_gtk_handle)
return 0;
perf_gtk_handle = dlopen(PERF_GTK_DSO, RTLD_LAZY);
if (perf_gtk_handle == NULL) {
char buf[PATH_MAX];
scnprintf(buf, sizeof(buf), "%s/%s", LIBDIR, PERF_GTK_DSO);
perf_gtk_handle = dlopen(buf, RTLD_LAZY);
}
if (perf_gtk_handle == NULL)
return -1;
perf_ui_init = dlsym(perf_gtk_handle, "perf_gtk__init");
if (perf_ui_init == NULL)
goto out_close;
if (perf_ui_init() == 0)
return 0;
out_close:
dlclose(perf_gtk_handle);
return -1;
}
static void exit_gtk_browser(bool wait_for_ok)
{
void (*perf_ui_exit)(bool);
if (perf_gtk_handle == NULL)
return;
perf_ui_exit = dlsym(perf_gtk_handle, "perf_gtk__exit");
if (perf_ui_exit == NULL)
goto out_close;
perf_ui_exit(wait_for_ok);
out_close:
dlclose(perf_gtk_handle);
perf_gtk_handle = NULL;
}
#else
static inline int setup_gtk_browser(void) { return -1; }
static inline void exit_gtk_browser(bool wait_for_ok __maybe_unused) {}
#endif
int stdio__config_color(const struct option *opt __maybe_unused,
const char *mode, int unset __maybe_unused)
{
perf_use_color_default = perf_config_colorbool("color.ui", mode, -1);
return 0;
}
void setup_browser(bool fallback_to_pager)
{
mutex_init(&ui__lock);
if (use_browser < 2 && (!isatty(1) || dump_trace))
use_browser = 0;
/* default to TUI */
if (use_browser < 0)
use_browser = 1;
switch (use_browser) {
case 2:
if (setup_gtk_browser() == 0)
break;
printf("GTK browser requested but could not find %s\n",
PERF_GTK_DSO);
sleep(1);
use_browser = 1;
/* fall through */
case 1:
if (ui__init() == 0)
break;
/* fall through */
default:
use_browser = 0;
if (fallback_to_pager)
setup_pager();
break;
}
}
void exit_browser(bool wait_for_ok)
{
switch (use_browser) {
case 2:
exit_gtk_browser(wait_for_ok);
break;
case 1:
ui__exit(wait_for_ok);
break;
default:
break;
}
mutex_destroy(&ui__lock);
}
void pthread__block_sigwinch(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGWINCH);
pthread_sigmask(SIG_BLOCK, &set, NULL);
}
void pthread__unblock_sigwinch(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGWINCH);
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}
| linux-master | tools/perf/ui/setup.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <linux/compiler.h>
#include "../util/callchain.h"
#include "../util/debug.h"
#include "../util/hist.h"
#include "../util/sort.h"
#include "../util/evsel.h"
#include "../util/evlist.h"
#include "../util/thread.h"
#include "../util/util.h"
/* hist period print (hpp) functions */
#define hpp__call_print_fn(hpp, fn, fmt, ...) \
({ \
int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
advance_hpp(hpp, __ret); \
__ret; \
})
static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
hpp_field_fn get_field, const char *fmt, int len,
hpp_snprint_fn print_fn, bool fmt_percent)
{
int ret;
struct hists *hists = he->hists;
struct evsel *evsel = hists_to_evsel(hists);
char *buf = hpp->buf;
size_t size = hpp->size;
if (fmt_percent) {
double percent = 0.0;
u64 total = hists__total_period(hists);
if (total)
percent = 100.0 * get_field(he) / total;
ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
} else
ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
if (evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
struct hist_entry *pair;
int nr_members = evsel->core.nr_members;
prev_idx = evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
u64 total = hists__total_period(pair->hists);
if (!total)
continue;
evsel = hists_to_evsel(pair->hists);
idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members in the middle which
* have no sample
*/
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, len, 0.0);
} else {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, len, 0ULL);
}
}
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
100.0 * period / total);
} else {
ret += hpp__call_print_fn(hpp, print_fn, fmt,
len, period);
}
prev_idx = evsel__group_idx(evsel);
}
idx_delta = nr_members - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members at last which have no sample
*/
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, len, 0.0);
} else {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, len, 0ULL);
}
}
}
/*
* Restore original buf and size as it's where caller expects
* the result will be saved.
*/
hpp->buf = buf;
hpp->size = size;
return ret;
}
int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he, hpp_field_fn get_field,
const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
{
int len = fmt->user_len ?: fmt->len;
if (symbol_conf.field_sep) {
return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
print_fn, fmt_percent);
}
if (fmt_percent)
len -= 2; /* 2 for a space and a % sign */
else
len -= 1;
return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
}
int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he, hpp_field_fn get_field,
const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
{
if (!symbol_conf.cumulate_callchain) {
int len = fmt->user_len ?: fmt->len;
return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
}
return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
}
static int field_cmp(u64 field_a, u64 field_b)
{
if (field_a > field_b)
return 1;
if (field_a < field_b)
return -1;
return 0;
}
static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
hpp_field_fn get_field, int nr_members,
u64 **fields_a, u64 **fields_b)
{
u64 *fa = calloc(nr_members, sizeof(*fa)),
*fb = calloc(nr_members, sizeof(*fb));
struct hist_entry *pair;
if (!fa || !fb)
goto out_free;
list_for_each_entry(pair, &a->pairs.head, pairs.node) {
struct evsel *evsel = hists_to_evsel(pair->hists);
fa[evsel__group_idx(evsel)] = get_field(pair);
}
list_for_each_entry(pair, &b->pairs.head, pairs.node) {
struct evsel *evsel = hists_to_evsel(pair->hists);
fb[evsel__group_idx(evsel)] = get_field(pair);
}
*fields_a = fa;
*fields_b = fb;
return 0;
out_free:
free(fa);
free(fb);
*fields_a = *fields_b = NULL;
return -1;
}
static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
hpp_field_fn get_field, int idx)
{
struct evsel *evsel = hists_to_evsel(a->hists);
u64 *fields_a, *fields_b;
int cmp, nr_members, ret, i;
cmp = field_cmp(get_field(a), get_field(b));
if (!evsel__is_group_event(evsel))
return cmp;
nr_members = evsel->core.nr_members;
if (idx < 1 || idx >= nr_members)
return cmp;
ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
if (ret) {
ret = cmp;
goto out;
}
ret = field_cmp(fields_a[idx], fields_b[idx]);
if (ret)
goto out;
for (i = 1; i < nr_members; i++) {
if (i != idx) {
ret = field_cmp(fields_a[i], fields_b[i]);
if (ret)
goto out;
}
}
out:
free(fields_a);
free(fields_b);
return ret;
}
static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
hpp_field_fn get_field)
{
s64 ret;
int i, nr_members;
struct evsel *evsel;
u64 *fields_a, *fields_b;
if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
return __hpp__group_sort_idx(a, b, get_field,
symbol_conf.group_sort_idx);
}
ret = field_cmp(get_field(a), get_field(b));
if (ret || !symbol_conf.event_group)
return ret;
evsel = hists_to_evsel(a->hists);
if (!evsel__is_group_event(evsel))
return ret;
nr_members = evsel->core.nr_members;
i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
if (i)
goto out;
for (i = 1; i < nr_members; i++) {
ret = field_cmp(fields_a[i], fields_b[i]);
if (ret)
break;
}
out:
free(fields_a);
free(fields_b);
return ret;
}
static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
hpp_field_fn get_field)
{
s64 ret = 0;
if (symbol_conf.cumulate_callchain) {
/*
* Put caller above callee when they have equal period.
*/
ret = field_cmp(get_field(a), get_field(b));
if (ret)
return ret;
if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
(b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
!hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
return 0;
ret = b->callchain->max_depth - a->callchain->max_depth;
if (callchain_param.order == ORDER_CALLER)
ret = -ret;
}
return ret;
}
static int hpp__width_fn(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists)
{
int len = fmt->user_len ?: fmt->len;
struct evsel *evsel = hists_to_evsel(hists);
if (symbol_conf.event_group)
len = max(len, evsel->core.nr_members * fmt->len);
if (len < (int)strlen(fmt->name))
len = strlen(fmt->name);
return len;
}
static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hists *hists, int line __maybe_unused,
int *span __maybe_unused)
{
int len = hpp__width_fn(fmt, hpp, hists);
return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
}
int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
va_list args;
ssize_t ssize = hpp->size;
double percent;
int ret, len;
va_start(args, fmt);
len = va_arg(args, int);
percent = va_arg(args, double);
ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
va_end(args);
return (ret >= ssize) ? (ssize - 1) : ret;
}
static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
va_list args;
ssize_t ssize = hpp->size;
int ret;
va_start(args, fmt);
ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
va_end(args);
return (ret >= ssize) ? (ssize - 1) : ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
hpp_color_scnprintf, true); \
}
#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
hpp_entry_scnprintf, true); \
}
#define __HPP_SORT_FN(_type, _field) \
static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort(a, b, he_get_##_field); \
}
#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
static u64 he_get_acc_##_field(struct hist_entry *he) \
{ \
return he->stat_acc->_field; \
} \
\
static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
hpp_color_scnprintf, true); \
}
#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
hpp_entry_scnprintf, true); \
}
#define __HPP_SORT_ACC_FN(_type, _field) \
static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort_acc(a, b, he_get_acc_##_field); \
}
#define __HPP_ENTRY_RAW_FN(_type, _field) \
static u64 he_get_raw_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
hpp_entry_scnprintf, false); \
}
#define __HPP_SORT_RAW_FN(_type, _field) \
static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort(a, b, he_get_raw_##_field); \
}
#define HPP_PERCENT_FNS(_type, _field) \
__HPP_COLOR_PERCENT_FN(_type, _field) \
__HPP_ENTRY_PERCENT_FN(_type, _field) \
__HPP_SORT_FN(_type, _field)
#define HPP_PERCENT_ACC_FNS(_type, _field) \
__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
__HPP_SORT_ACC_FN(_type, _field)
#define HPP_RAW_FNS(_type, _field) \
__HPP_ENTRY_RAW_FN(_type, _field) \
__HPP_SORT_RAW_FN(_type, _field)
HPP_PERCENT_FNS(overhead, period)
HPP_PERCENT_FNS(overhead_sys, period_sys)
HPP_PERCENT_FNS(overhead_us, period_us)
HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
HPP_PERCENT_ACC_FNS(overhead_acc, period)
HPP_RAW_FNS(samples, nr_events)
HPP_RAW_FNS(period, period)
static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *a __maybe_unused,
struct hist_entry *b __maybe_unused)
{
return 0;
}
static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
{
return a->header == hpp__header_fn;
}
static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
{
if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
return false;
return a->idx == b->idx;
}
#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
{ \
.name = _name, \
.header = hpp__header_fn, \
.width = hpp__width_fn, \
.color = hpp__color_ ## _fn, \
.entry = hpp__entry_ ## _fn, \
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
.sort = hpp__sort_ ## _fn, \
.idx = PERF_HPP__ ## _idx, \
.equal = hpp__equal, \
}
#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
{ \
.name = _name, \
.header = hpp__header_fn, \
.width = hpp__width_fn, \
.color = hpp__color_ ## _fn, \
.entry = hpp__entry_ ## _fn, \
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
.sort = hpp__sort_ ## _fn, \
.idx = PERF_HPP__ ## _idx, \
.equal = hpp__equal, \
}
#define HPP__PRINT_FNS(_name, _fn, _idx) \
{ \
.name = _name, \
.header = hpp__header_fn, \
.width = hpp__width_fn, \
.entry = hpp__entry_ ## _fn, \
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
.sort = hpp__sort_ ## _fn, \
.idx = PERF_HPP__ ## _idx, \
.equal = hpp__equal, \
}
struct perf_hpp_fmt perf_hpp__format[] = {
HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
HPP__PRINT_FNS("Samples", samples, SAMPLES),
HPP__PRINT_FNS("Period", period, PERIOD)
};
struct perf_hpp_list perf_hpp_list = {
.fields = LIST_HEAD_INIT(perf_hpp_list.fields),
.sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
.nr_header_lines = 1,
};
#undef HPP__COLOR_PRINT_FNS
#undef HPP__COLOR_ACC_PRINT_FNS
#undef HPP__PRINT_FNS
#undef HPP_PERCENT_FNS
#undef HPP_PERCENT_ACC_FNS
#undef HPP_RAW_FNS
#undef __HPP_HEADER_FN
#undef __HPP_WIDTH_FN
#undef __HPP_COLOR_PERCENT_FN
#undef __HPP_ENTRY_PERCENT_FN
#undef __HPP_COLOR_ACC_PERCENT_FN
#undef __HPP_ENTRY_ACC_PERCENT_FN
#undef __HPP_ENTRY_RAW_FN
#undef __HPP_SORT_FN
#undef __HPP_SORT_ACC_FN
#undef __HPP_SORT_RAW_FN
static void fmt_free(struct perf_hpp_fmt *fmt)
{
/*
* At this point fmt should be completely
* unhooked, if not it's a bug.
*/
BUG_ON(!list_empty(&fmt->list));
BUG_ON(!list_empty(&fmt->sort_list));
if (fmt->free)
fmt->free(fmt);
}
void perf_hpp__init(void)
{
int i;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
INIT_LIST_HEAD(&fmt->list);
/* sort_list may be linked by setup_sorting() */
if (fmt->sort_list.next == NULL)
INIT_LIST_HEAD(&fmt->sort_list);
}
/*
* If user specified field order, no need to setup default fields.
*/
if (is_strict_order(field_order))
return;
if (symbol_conf.cumulate_callchain) {
hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
}
hpp_dimension__add_output(PERF_HPP__OVERHEAD);
if (symbol_conf.show_cpu_utilization) {
hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
if (perf_guest) {
hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
}
}
if (symbol_conf.show_nr_samples)
hpp_dimension__add_output(PERF_HPP__SAMPLES);
if (symbol_conf.show_total_period)
hpp_dimension__add_output(PERF_HPP__PERIOD);
}
void perf_hpp_list__column_register(struct perf_hpp_list *list,
struct perf_hpp_fmt *format)
{
list_add_tail(&format->list, &list->fields);
}
void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format)
{
list_add_tail(&format->sort_list, &list->sorts);
}
void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format)
{
list_add(&format->sort_list, &list->sorts);
}
static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
list_del_init(&format->list);
fmt_free(format);
}
void perf_hpp__cancel_cumulate(void)
{
struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
if (is_strict_order(field_order))
return;
ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
if (acc->equal(acc, fmt)) {
perf_hpp__column_unregister(fmt);
continue;
}
if (ovh->equal(ovh, fmt))
fmt->name = "Overhead";
}
}
static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
{
return a->equal && a->equal(a, b);
}
void perf_hpp__setup_output_field(struct perf_hpp_list *list)
{
struct perf_hpp_fmt *fmt;
/* append sort keys to output field */
perf_hpp_list__for_each_sort_list(list, fmt) {
struct perf_hpp_fmt *pos;
/* skip sort-only fields ("sort_compute" in perf diff) */
if (!fmt->entry && !fmt->color)
continue;
perf_hpp_list__for_each_format(list, pos) {
if (fmt_equal(fmt, pos))
goto next;
}
perf_hpp__column_register(fmt);
next:
continue;
}
}
void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
{
struct perf_hpp_fmt *fmt;
/* append output fields to sort keys */
perf_hpp_list__for_each_format(list, fmt) {
struct perf_hpp_fmt *pos;
perf_hpp_list__for_each_sort_list(list, pos) {
if (fmt_equal(fmt, pos))
goto next;
}
perf_hpp__register_sort_field(fmt);
next:
continue;
}
}
void perf_hpp__reset_output_field(struct perf_hpp_list *list)
{
struct perf_hpp_fmt *fmt, *tmp;
/* reset output fields */
perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
list_del_init(&fmt->list);
list_del_init(&fmt->sort_list);
fmt_free(fmt);
}
/* reset sort keys */
perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
list_del_init(&fmt->list);
list_del_init(&fmt->sort_list);
fmt_free(fmt);
}
}
/*
* See hists__fprintf to match the column widths
*/
unsigned int hists__sort_list_width(struct hists *hists)
{
struct perf_hpp_fmt *fmt;
int ret = 0;
bool first = true;
struct perf_hpp dummy_hpp;
hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
if (first)
first = false;
else
ret += 2;
ret += fmt->width(fmt, &dummy_hpp, hists);
}
if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
ret += 3 + BITS_PER_LONG / 4;
return ret;
}
unsigned int hists__overhead_width(struct hists *hists)
{
struct perf_hpp_fmt *fmt;
int ret = 0;
bool first = true;
struct perf_hpp dummy_hpp;
hists__for_each_format(hists, fmt) {
if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
break;
if (first)
first = false;
else
ret += 2;
ret += fmt->width(fmt, &dummy_hpp, hists);
}
return ret;
}
void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
{
if (perf_hpp__is_sort_entry(fmt))
return perf_hpp__reset_sort_width(fmt, hists);
if (perf_hpp__is_dynamic_entry(fmt))
return;
BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
switch (fmt->idx) {
case PERF_HPP__OVERHEAD:
case PERF_HPP__OVERHEAD_SYS:
case PERF_HPP__OVERHEAD_US:
case PERF_HPP__OVERHEAD_ACC:
fmt->len = 8;
break;
case PERF_HPP__OVERHEAD_GUEST_SYS:
case PERF_HPP__OVERHEAD_GUEST_US:
fmt->len = 9;
break;
case PERF_HPP__SAMPLES:
case PERF_HPP__PERIOD:
fmt->len = 12;
break;
default:
break;
}
}
void hists__reset_column_width(struct hists *hists)
{
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *node;
hists__for_each_format(hists, fmt)
perf_hpp__reset_width(fmt, hists);
/* hierarchy entries have their own hpp list */
list_for_each_entry(node, &hists->hpp_formats, list) {
perf_hpp_list__for_each_format(&node->hpp, fmt)
perf_hpp__reset_width(fmt, hists);
}
}
void perf_hpp__set_user_width(const char *width_list_str)
{
struct perf_hpp_fmt *fmt;
const char *ptr = width_list_str;
perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
char *p;
int len = strtol(ptr, &p, 10);
fmt->user_len = len;
if (*p == ',')
ptr = p + 1;
else
break;
}
}
static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
{
struct perf_hpp_list_node *node = NULL;
struct perf_hpp_fmt *fmt_copy;
bool found = false;
bool skip = perf_hpp__should_skip(fmt, hists);
list_for_each_entry(node, &hists->hpp_formats, list) {
if (node->level == fmt->level) {
found = true;
break;
}
}
if (!found) {
node = malloc(sizeof(*node));
if (node == NULL)
return -1;
node->skip = skip;
node->level = fmt->level;
perf_hpp_list__init(&node->hpp);
hists->nr_hpp_node++;
list_add_tail(&node->list, &hists->hpp_formats);
}
fmt_copy = perf_hpp_fmt__dup(fmt);
if (fmt_copy == NULL)
return -1;
if (!skip)
node->skip = false;
list_add_tail(&fmt_copy->list, &node->hpp.fields);
list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
return 0;
}
int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
struct evlist *evlist)
{
struct evsel *evsel;
struct perf_hpp_fmt *fmt;
struct hists *hists;
int ret;
if (!symbol_conf.report_hierarchy)
return 0;
evlist__for_each_entry(evlist, evsel) {
hists = evsel__hists(evsel);
perf_hpp_list__for_each_sort_list(list, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
!perf_hpp__defined_dynamic_entry(fmt, hists))
continue;
ret = add_hierarchy_fmt(hists, fmt);
if (ret < 0)
return ret;
}
}
return 0;
}
| linux-master | tools/perf/ui/hist.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include "progress.h"
static void null_progress__update(struct ui_progress *p __maybe_unused)
{
}
static struct ui_progress_ops null_progress__ops =
{
.update = null_progress__update,
};
struct ui_progress_ops *ui_progress__ops = &null_progress__ops;
void ui_progress__update(struct ui_progress *p, u64 adv)
{
u64 last = p->curr;
p->curr += adv;
if (p->curr >= p->next) {
u64 nr = DIV_ROUND_UP(p->curr - last, p->step);
p->next += nr * p->step;
ui_progress__ops->update(p);
}
}
void __ui_progress__init(struct ui_progress *p, u64 total,
const char *title, bool size)
{
p->curr = 0;
p->next = p->step = total / 16 ?: 1;
p->total = total;
p->title = title;
p->size = size;
if (ui_progress__ops->init)
ui_progress__ops->init(p);
}
void ui_progress__finish(void)
{
if (ui_progress__ops->finish)
ui_progress__ops->finish();
}
| linux-master | tools/perf/ui/progress.c |
// SPDX-License-Identifier: GPL-2.0
#include <signal.h>
#include <stdbool.h>
#include <string.h>
#include <stdlib.h>
#include <sys/ttydefaults.h>
#include "../browser.h"
#include "../keysyms.h"
#include "../helpline.h"
#include "../ui.h"
#include "../util.h"
#include "../libslang.h"
static void ui_browser__argv_write(struct ui_browser *browser,
void *entry, int row)
{
char **arg = entry;
bool current_entry = ui_browser__is_current_entry(browser, row);
ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
HE_COLORSET_NORMAL);
ui_browser__write_nstring(browser, *arg, browser->width);
}
static int popup_menu__run(struct ui_browser *menu, int *keyp)
{
int key;
if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
return -1;
while (1) {
key = ui_browser__run(menu, 0);
switch (key) {
case K_RIGHT:
case K_ENTER:
key = menu->index;
break;
case K_LEFT:
case K_ESC:
case 'q':
case CTRL('c'):
key = -1;
break;
default:
if (keyp) {
*keyp = key;
key = menu->nr_entries;
break;
}
continue;
}
break;
}
ui_browser__hide(menu);
return key;
}
int ui__popup_menu(int argc, char * const argv[], int *keyp)
{
struct ui_browser menu = {
.entries = (void *)argv,
.refresh = ui_browser__argv_refresh,
.seek = ui_browser__argv_seek,
.write = ui_browser__argv_write,
.nr_entries = argc,
};
return popup_menu__run(&menu, keyp);
}
int ui_browser__input_window(const char *title, const char *text, char *input,
const char *exit_msg, int delay_secs)
{
int x, y, len, key;
int max_len = 60, nr_lines = 0;
static char buf[50];
const char *t;
t = text;
while (1) {
const char *sep = strchr(t, '\n');
if (sep == NULL)
sep = strchr(t, '\0');
len = sep - t;
if (max_len < len)
max_len = len;
++nr_lines;
if (*sep == '\0')
break;
t = sep + 1;
}
mutex_lock(&ui__lock);
max_len += 2;
nr_lines += 8;
y = SLtt_Screen_Rows / 2 - nr_lines / 2;
x = SLtt_Screen_Cols / 2 - max_len / 2;
SLsmg_set_color(0);
SLsmg_draw_box(y, x++, nr_lines, max_len);
if (title) {
SLsmg_gotorc(y, x + 1);
SLsmg_write_string(title);
}
SLsmg_gotorc(++y, x);
nr_lines -= 7;
max_len -= 2;
SLsmg_write_wrapped_string((unsigned char *)text, y, x,
nr_lines, max_len, 1);
y += nr_lines;
len = 5;
while (len--) {
SLsmg_gotorc(y + len - 1, x);
SLsmg_write_nstring(" ", max_len);
}
SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
SLsmg_gotorc(y + 3, x);
SLsmg_write_nstring(exit_msg, max_len);
SLsmg_refresh();
mutex_unlock(&ui__lock);
x += 2;
len = 0;
key = ui__getch(delay_secs);
while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
mutex_lock(&ui__lock);
if (key == K_BKSPC) {
if (len == 0) {
mutex_unlock(&ui__lock);
goto next_key;
}
SLsmg_gotorc(y, x + --len);
SLsmg_write_char(' ');
} else {
buf[len] = key;
SLsmg_gotorc(y, x + len++);
SLsmg_write_char(key);
}
SLsmg_refresh();
mutex_unlock(&ui__lock);
/* XXX more graceful overflow handling needed */
if (len == sizeof(buf) - 1) {
ui_helpline__push("maximum size of symbol name reached!");
key = K_ENTER;
break;
}
next_key:
key = ui__getch(delay_secs);
}
buf[len] = '\0';
strncpy(input, buf, len+1);
return key;
}
void __ui__info_window(const char *title, const char *text, const char *exit_msg)
{
int x, y;
int max_len = 0, nr_lines = 0;
const char *t;
t = text;
while (1) {
const char *sep = strchr(t, '\n');
int len;
if (sep == NULL)
sep = strchr(t, '\0');
len = sep - t;
if (max_len < len)
max_len = len;
++nr_lines;
if (*sep == '\0')
break;
t = sep + 1;
}
max_len += 2;
nr_lines += 2;
if (exit_msg)
nr_lines += 2;
y = SLtt_Screen_Rows / 2 - nr_lines / 2,
x = SLtt_Screen_Cols / 2 - max_len / 2;
SLsmg_set_color(0);
SLsmg_draw_box(y, x++, nr_lines, max_len);
if (title) {
SLsmg_gotorc(y, x + 1);
SLsmg_write_string(title);
}
SLsmg_gotorc(++y, x);
if (exit_msg)
nr_lines -= 2;
max_len -= 2;
SLsmg_write_wrapped_string((unsigned char *)text, y, x,
nr_lines, max_len, 1);
if (exit_msg) {
SLsmg_gotorc(y + nr_lines - 2, x);
SLsmg_write_nstring(" ", max_len);
SLsmg_gotorc(y + nr_lines - 1, x);
SLsmg_write_nstring(exit_msg, max_len);
}
}
void ui__info_window(const char *title, const char *text)
{
mutex_lock(&ui__lock);
__ui__info_window(title, text, NULL);
SLsmg_refresh();
mutex_unlock(&ui__lock);
}
int ui__question_window(const char *title, const char *text,
const char *exit_msg, int delay_secs)
{
mutex_lock(&ui__lock);
__ui__info_window(title, text, exit_msg);
SLsmg_refresh();
mutex_unlock(&ui__lock);
return ui__getch(delay_secs);
}
int ui__help_window(const char *text)
{
return ui__question_window("Help", text, "Press any key...", 0);
}
int ui__dialog_yesno(const char *msg)
{
return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
}
static int __ui__warning(const char *title, const char *format, va_list args)
{
char *s;
if (vasprintf(&s, format, args) > 0) {
int key;
key = ui__question_window(title, s, "Press any key...", 0);
free(s);
return key;
}
fprintf(stderr, "%s\n", title);
vfprintf(stderr, format, args);
return K_ESC;
}
static int perf_tui__error(const char *format, va_list args)
{
return __ui__warning("Error:", format, args);
}
static int perf_tui__warning(const char *format, va_list args)
{
return __ui__warning("Warning:", format, args);
}
struct perf_error_ops perf_tui_eops = {
.error = perf_tui__error,
.warning = perf_tui__warning,
};
| linux-master | tools/perf/ui/tui/util.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include "../helpline.h"
#include "../ui.h"
#include "../libslang.h"
char ui_helpline__last_msg[1024];
bool tui_helpline__set;
static void tui_helpline__pop(void)
{
}
static void tui_helpline__push(const char *msg)
{
const size_t sz = sizeof(ui_helpline__current);
SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
SLsmg_set_color(0);
SLsmg_write_nstring(msg, SLtt_Screen_Cols);
SLsmg_refresh();
strlcpy(ui_helpline__current, msg, sz);
}
static int tui_helpline__show(const char *format, va_list ap)
{
int ret;
static int backlog;
mutex_lock(&ui__lock);
ret = vscnprintf(ui_helpline__last_msg + backlog,
sizeof(ui_helpline__last_msg) - backlog, format, ap);
backlog += ret;
tui_helpline__set = true;
if (ui_helpline__last_msg[backlog - 1] == '\n') {
ui_helpline__puts(ui_helpline__last_msg);
SLsmg_refresh();
backlog = 0;
}
mutex_unlock(&ui__lock);
return ret;
}
struct ui_helpline tui_helpline_fns = {
.pop = tui_helpline__pop,
.push = tui_helpline__push,
.show = tui_helpline__show,
};
void ui_helpline__init(void)
{
helpline_fns = &tui_helpline_fns;
ui_helpline__puts(" ");
}
| linux-master | tools/perf/ui/tui/helpline.c |
#include <errno.h>
#include <signal.h>
#include <stdbool.h>
#include <stdlib.h>
#include <unistd.h>
#include <linux/kernel.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
#include "../../util/debug.h"
#include "../browser.h"
#include "../helpline.h"
#include "../ui.h"
#include "../util.h"
#include "../libslang.h"
#include "../keysyms.h"
#include "tui.h"
static volatile int ui__need_resize;
extern struct perf_error_ops perf_tui_eops;
extern bool tui_helpline__set;
extern void hist_browser__init_hpp(void);
void ui__refresh_dimensions(bool force)
{
if (force || ui__need_resize) {
ui__need_resize = 0;
mutex_lock(&ui__lock);
SLtt_get_screen_size();
SLsmg_reinit_smg();
mutex_unlock(&ui__lock);
}
}
static void ui__sigwinch(int sig __maybe_unused)
{
ui__need_resize = 1;
}
static void ui__setup_sigwinch(void)
{
static bool done;
if (done)
return;
done = true;
pthread__unblock_sigwinch();
signal(SIGWINCH, ui__sigwinch);
}
int ui__getch(int delay_secs)
{
struct timeval timeout, *ptimeout = delay_secs ? &timeout : NULL;
fd_set read_set;
int err, key;
ui__setup_sigwinch();
FD_ZERO(&read_set);
FD_SET(0, &read_set);
if (delay_secs) {
timeout.tv_sec = delay_secs;
timeout.tv_usec = 0;
}
err = select(1, &read_set, NULL, NULL, ptimeout);
if (err == 0)
return K_TIMER;
if (err == -1) {
if (errno == EINTR)
return K_RESIZE;
return K_ERROR;
}
key = SLang_getkey();
if (key != K_ESC)
return key;
FD_ZERO(&read_set);
FD_SET(0, &read_set);
timeout.tv_sec = 0;
timeout.tv_usec = 20;
err = select(1, &read_set, NULL, NULL, &timeout);
if (err == 0)
return K_ESC;
SLang_ungetkey(key);
return SLkp_getkey();
}
#ifdef HAVE_BACKTRACE_SUPPORT
static void ui__signal_backtrace(int sig)
{
void *stackdump[32];
size_t size;
ui__exit(false);
psignal(sig, "perf");
printf("-------- backtrace --------\n");
size = backtrace(stackdump, ARRAY_SIZE(stackdump));
backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
exit(0);
}
#else
# define ui__signal_backtrace ui__signal
#endif
static void ui__signal(int sig)
{
ui__exit(false);
psignal(sig, "perf");
exit(0);
}
int ui__init(void)
{
int err;
SLutf8_enable(-1);
SLtt_get_terminfo();
SLtt_get_screen_size();
err = SLsmg_init_smg();
if (err < 0)
goto out;
err = SLang_init_tty(-1, 0, 0);
if (err < 0)
goto out;
err = SLkp_init();
if (err < 0) {
pr_err("TUI initialization failed.\n");
goto out;
}
SLkp_define_keysym("^(kB)", SL_KEY_UNTAB);
signal(SIGSEGV, ui__signal_backtrace);
signal(SIGFPE, ui__signal_backtrace);
signal(SIGINT, ui__signal);
signal(SIGQUIT, ui__signal);
signal(SIGTERM, ui__signal);
perf_error__register(&perf_tui_eops);
ui_helpline__init();
ui_browser__init();
tui_progress__init();
hist_browser__init_hpp();
out:
return err;
}
void ui__exit(bool wait_for_ok)
{
if (wait_for_ok && tui_helpline__set)
ui__question_window("Fatal Error",
ui_helpline__last_msg,
"Press any key...", 0);
SLtt_set_cursor_visibility(1);
if (mutex_trylock(&ui__lock)) {
SLsmg_refresh();
SLsmg_reset_smg();
mutex_unlock(&ui__lock);
}
SLang_reset_tty();
perf_error__unregister(&perf_tui_eops);
}
| linux-master | tools/perf/ui/tui/setup.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include "../progress.h"
#include "../libslang.h"
#include "../ui.h"
#include "tui.h"
#include "units.h"
#include "../browser.h"
static void __tui_progress__init(struct ui_progress *p)
{
p->next = p->step = p->total / (SLtt_Screen_Cols - 2) ?: 1;
}
static int get_title(struct ui_progress *p, char *buf, size_t size)
{
char buf_cur[20];
char buf_tot[20];
int ret;
ret = unit_number__scnprintf(buf_cur, sizeof(buf_cur), p->curr);
ret += unit_number__scnprintf(buf_tot, sizeof(buf_tot), p->total);
return ret + scnprintf(buf, size, "%s [%s/%s]",
p->title, buf_cur, buf_tot);
}
static void tui_progress__update(struct ui_progress *p)
{
char buf[100], *title = (char *) p->title;
int bar, y;
/*
* FIXME: We should have a per UI backend way of showing progress,
* stdio will just show a percentage as NN%, etc.
*/
if (use_browser <= 0)
return;
if (p->total == 0)
return;
if (p->size) {
get_title(p, buf, sizeof(buf));
title = buf;
}
ui__refresh_dimensions(false);
mutex_lock(&ui__lock);
y = SLtt_Screen_Rows / 2 - 2;
SLsmg_set_color(0);
SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
SLsmg_gotorc(y++, 1);
SLsmg_write_string(title);
SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' ');
SLsmg_set_color(HE_COLORSET_SELECTED);
bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
SLsmg_fill_region(y, 1, 1, bar, ' ');
SLsmg_refresh();
mutex_unlock(&ui__lock);
}
static void tui_progress__finish(void)
{
int y;
if (use_browser <= 0)
return;
ui__refresh_dimensions(false);
mutex_lock(&ui__lock);
y = SLtt_Screen_Rows / 2 - 2;
SLsmg_set_color(0);
SLsmg_fill_region(y, 0, 3, SLtt_Screen_Cols, ' ');
SLsmg_refresh();
mutex_unlock(&ui__lock);
}
static struct ui_progress_ops tui_progress__ops = {
.init = __tui_progress__init,
.update = tui_progress__update,
.finish = tui_progress__finish,
};
void tui_progress__init(void)
{
ui_progress__ops = &tui_progress__ops;
}
| linux-master | tools/perf/ui/tui/progress.c |
// SPDX-License-Identifier: GPL-2.0
#include "../../builtin.h"
#include "../../perf.h"
#include "../../util/util.h" // perf_exe()
#include "../util.h"
#include "../../util/hist.h"
#include "../../util/debug.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../libslang.h"
#include "config.h"
#include <linux/string.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#define SCRIPT_NAMELEN 128
#define SCRIPT_MAX_NO 64
/*
* Usually the full path for a script is:
* /home/username/libexec/perf-core/scripts/python/xxx.py
* /home/username/libexec/perf-core/scripts/perl/xxx.pl
* So 256 should be long enough to contain the full path.
*/
#define SCRIPT_FULLPATH_LEN 256
struct script_config {
const char **names;
char **paths;
int index;
const char *perf;
char extra_format[256];
};
void attr_to_script(char *extra_format, struct perf_event_attr *attr)
{
extra_format[0] = 0;
if (attr->read_format & PERF_FORMAT_GROUP)
strcat(extra_format, " -F +metric");
if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK)
strcat(extra_format, " -F +brstackinsn --xed");
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
strcat(extra_format, " -F +iregs");
if (attr->sample_type & PERF_SAMPLE_REGS_USER)
strcat(extra_format, " -F +uregs");
if (attr->sample_type & PERF_SAMPLE_PHYS_ADDR)
strcat(extra_format, " -F +phys_addr");
}
static int add_script_option(const char *name, const char *opt,
struct script_config *c)
{
c->names[c->index] = name;
if (asprintf(&c->paths[c->index],
"%s script %s -F +metric %s %s",
c->perf, opt, symbol_conf.inline_name ? " --inline" : "",
c->extra_format) < 0)
return -1;
c->index++;
return 0;
}
static int scripts_config(const char *var, const char *value, void *data)
{
struct script_config *c = data;
if (!strstarts(var, "scripts."))
return -1;
if (c->index >= SCRIPT_MAX_NO)
return -1;
c->names[c->index] = strdup(var + 7);
if (!c->names[c->index])
return -1;
if (asprintf(&c->paths[c->index], "%s %s", value,
c->extra_format) < 0)
return -1;
c->index++;
return 0;
}
/*
* When success, will copy the full path of the selected script
* into the buffer pointed by script_name, and return 0.
* Return -1 on failure.
*/
static int list_scripts(char *script_name, bool *custom,
struct evsel *evsel)
{
char *buf, *paths[SCRIPT_MAX_NO], *names[SCRIPT_MAX_NO];
int i, num, choice;
int ret = 0;
int max_std, custom_perf;
char pbuf[256];
const char *perf = perf_exe(pbuf, sizeof pbuf);
struct script_config scriptc = {
.names = (const char **)names,
.paths = paths,
.perf = perf
};
script_name[0] = 0;
/* Preset the script name to SCRIPT_NAMELEN */
buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN));
if (!buf)
return -1;
if (evsel)
attr_to_script(scriptc.extra_format, &evsel->core.attr);
add_script_option("Show individual samples", "", &scriptc);
add_script_option("Show individual samples with assembler", "-F +insn --xed",
&scriptc);
add_script_option("Show individual samples with source", "-F +srcline,+srccode",
&scriptc);
perf_config(scripts_config, &scriptc);
custom_perf = scriptc.index;
add_script_option("Show samples with custom perf script arguments", "", &scriptc);
i = scriptc.index;
max_std = i;
for (; i < SCRIPT_MAX_NO; i++) {
names[i] = buf + (i - max_std) * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
paths[i] = names[i] + SCRIPT_NAMELEN;
}
num = find_scripts(names + max_std, paths + max_std, SCRIPT_MAX_NO - max_std,
SCRIPT_FULLPATH_LEN);
if (num < 0)
num = 0;
choice = ui__popup_menu(num + max_std, (char * const *)names, NULL);
if (choice < 0) {
ret = -1;
goto out;
}
if (choice == custom_perf) {
char script_args[50];
int key = ui_browser__input_window("perf script command",
"Enter perf script command line (without perf script prefix)",
script_args, "", 0);
if (key != K_ENTER) {
ret = -1;
goto out;
}
sprintf(script_name, "%s script %s", perf, script_args);
} else if (choice < num + max_std) {
strcpy(script_name, paths[choice]);
}
*custom = choice >= max_std;
out:
free(buf);
for (i = 0; i < max_std; i++)
zfree(&paths[i]);
return ret;
}
void run_script(char *cmd)
{
pr_debug("Running %s\n", cmd);
SLang_reset_tty();
if (system(cmd) < 0)
pr_warning("Cannot run %s\n", cmd);
/*
* SLang doesn't seem to reset the whole terminal, so be more
* forceful to get back to the original state.
*/
printf("\033[c\033[H\033[J");
fflush(stdout);
SLang_init_tty(0, 0, 0);
SLsmg_refresh();
}
int script_browse(const char *script_opt, struct evsel *evsel)
{
char *cmd, script_name[SCRIPT_FULLPATH_LEN];
bool custom = false;
memset(script_name, 0, SCRIPT_FULLPATH_LEN);
if (list_scripts(script_name, &custom, evsel))
return -1;
if (asprintf(&cmd, "%s%s %s %s%s 2>&1 | less",
custom ? "perf script -s " : "",
script_name,
script_opt ? script_opt : "",
input_name ? "-i " : "",
input_name ? input_name : "") < 0)
return -1;
run_script(cmd);
free(cmd);
return 0;
}
| linux-master | tools/perf/ui/browsers/scripts.c |
// SPDX-License-Identifier: GPL-2.0
#include "../browser.h"
#include "../helpline.h"
#include "../ui.h"
#include "../../util/annotate.h"
#include "../../util/debug.h"
#include "../../util/dso.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
#include "../../util/map.h"
#include "../../util/mutex.h"
#include "../../util/symbol.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include <inttypes.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <sys/ttydefaults.h>
#include <asm/bug.h>
struct arch;
struct annotate_browser {
struct ui_browser b;
struct rb_root entries;
struct rb_node *curr_hot;
struct annotation_line *selection;
struct arch *arch;
struct annotation_options *opts;
bool searching_backwards;
char search_bf[128];
};
static inline struct annotation *browser__annotation(struct ui_browser *browser)
{
struct map_symbol *ms = browser->priv;
return symbol__annotation(ms->sym);
}
static bool disasm_line__filter(struct ui_browser *browser, void *entry)
{
struct annotation *notes = browser__annotation(browser);
struct annotation_line *al = list_entry(entry, struct annotation_line, node);
return annotation_line__filter(al, notes);
}
static int ui_browser__jumps_percent_color(struct ui_browser *browser, int nr, bool current)
{
struct annotation *notes = browser__annotation(browser);
if (current && (!browser->use_navkeypressed || browser->navkeypressed))
return HE_COLORSET_SELECTED;
if (nr == notes->max_jump_sources)
return HE_COLORSET_TOP;
if (nr > 1)
return HE_COLORSET_MEDIUM;
return HE_COLORSET_NORMAL;
}
static int ui_browser__set_jumps_percent_color(void *browser, int nr, bool current)
{
int color = ui_browser__jumps_percent_color(browser, nr, current);
return ui_browser__set_color(browser, color);
}
static int annotate_browser__set_color(void *browser, int color)
{
return ui_browser__set_color(browser, color);
}
static void annotate_browser__write_graph(void *browser, int graph)
{
ui_browser__write_graph(browser, graph);
}
static void annotate_browser__set_percent_color(void *browser, double percent, bool current)
{
ui_browser__set_percent_color(browser, percent, current);
}
static void annotate_browser__printf(void *browser, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
ui_browser__vprintf(browser, fmt, args);
va_end(args);
}
static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct annotation *notes = browser__annotation(browser);
struct annotation_line *al = list_entry(entry, struct annotation_line, node);
const bool is_current_entry = ui_browser__is_current_entry(browser, row);
struct annotation_write_ops ops = {
.first_line = row == 0,
.current_entry = is_current_entry,
.change_color = (!notes->options->hide_src_code &&
(!is_current_entry ||
(browser->use_navkeypressed &&
!browser->navkeypressed))),
.width = browser->width,
.obj = browser,
.set_color = annotate_browser__set_color,
.set_percent_color = annotate_browser__set_percent_color,
.set_jumps_percent_color = ui_browser__set_jumps_percent_color,
.printf = annotate_browser__printf,
.write_graph = annotate_browser__write_graph,
};
/* The scroll bar isn't being used */
if (!browser->navkeypressed)
ops.width += 1;
annotation_line__write(al, notes, &ops, ab->opts);
if (ops.current_entry)
ab->selection = al;
}
static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
int diff = 1;
while (pos && pos->al.offset == -1) {
pos = list_prev_entry(pos, al.node);
if (!ab->opts->hide_src_code)
diff++;
}
if (!pos)
return 0;
if (ins__is_lock(&pos->ins))
name = pos->ops.locked.ins.name;
else
name = pos->ins.name;
if (!name || !cursor->ins.name)
return 0;
if (ins__is_fused(ab->arch, name, cursor->ins.name))
return diff;
return 0;
}
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct disasm_line *cursor = disasm_line(ab->selection);
struct annotation_line *target;
unsigned int from, to;
struct map_symbol *ms = ab->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
u8 pcnt_width = annotation__pcnt_width(notes);
int width;
int diff = 0;
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
return;
if (!disasm_line__is_valid_local_jump(cursor, sym))
return;
/*
* This first was seen with a gcc function, _cpp_lex_token, that
* has the usual jumps:
*
* │1159e6c: ↓ jne 115aa32 <_cpp_lex_token@@Base+0xf92>
*
* I.e. jumps to a label inside that function (_cpp_lex_token), and
* those works, but also this kind:
*
* │1159e8b: ↓ jne c469be <cpp_named_operator2name@@Base+0xa72>
*
* I.e. jumps to another function, outside _cpp_lex_token, which
* are not being correctly handled generating as a side effect references
* to ab->offset[] entries that are set to NULL, so to make this code
* more robust, check that here.
*
* A proper fix for will be put in place, looking at the function
* name right after the '<' token and probably treating this like a
* 'call' instruction.
*/
target = notes->offsets[cursor->ops.target.offset];
if (target == NULL) {
ui_helpline__printf("WARN: jump target inconsistency, press 'o', notes->offsets[%#x] = NULL\n",
cursor->ops.target.offset);
return;
}
if (notes->options->hide_src_code) {
from = cursor->al.idx_asm;
to = target->idx_asm;
} else {
from = (u64)cursor->al.idx;
to = (u64)target->idx;
}
width = annotation__cycles_width(notes);
ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
__ui_browser__line_arrow(browser,
pcnt_width + 2 + notes->widths.addr + width,
from, to);
diff = is_fused(ab, cursor);
if (diff > 0) {
ui_browser__mark_fused(browser,
pcnt_width + 3 + notes->widths.addr + width,
from - diff, diff, to > from);
}
}
static unsigned int annotate_browser__refresh(struct ui_browser *browser)
{
struct annotation *notes = browser__annotation(browser);
int ret = ui_browser__list_head_refresh(browser);
int pcnt_width = annotation__pcnt_width(notes);
if (notes->options->jump_arrows)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
__ui_browser__vline(browser, pcnt_width, 0, browser->rows - 1);
return ret;
}
static double disasm__cmp(struct annotation_line *a, struct annotation_line *b,
int percent_type)
{
int i;
for (i = 0; i < a->data_nr; i++) {
if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type])
continue;
return a->data[i].percent[percent_type] -
b->data[i].percent[percent_type];
}
return 0;
}
static void disasm_rb_tree__insert(struct annotate_browser *browser,
struct annotation_line *al)
{
struct rb_root *root = &browser->entries;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct annotation_line *l;
while (*p != NULL) {
parent = *p;
l = rb_entry(parent, struct annotation_line, rb_node);
if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&al->rb_node, parent, p);
rb_insert_color(&al->rb_node, root);
}
static void annotate_browser__set_top(struct annotate_browser *browser,
struct annotation_line *pos, u32 idx)
{
struct annotation *notes = browser__annotation(&browser->b);
unsigned back;
ui_browser__refresh_dimensions(&browser->b);
back = browser->b.height / 2;
browser->b.top_idx = browser->b.index = idx;
while (browser->b.top_idx != 0 && back != 0) {
pos = list_entry(pos->node.prev, struct annotation_line, node);
if (annotation_line__filter(pos, notes))
continue;
--browser->b.top_idx;
--back;
}
browser->b.top = pos;
browser->b.navkeypressed = true;
}
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
struct rb_node *nd)
{
struct annotation *notes = browser__annotation(&browser->b);
struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node);
u32 idx = pos->idx;
if (notes->options->hide_src_code)
idx = pos->idx_asm;
annotate_browser__set_top(browser, pos, idx);
browser->curr_hot = nd;
}
static void annotate_browser__calc_percent(struct annotate_browser *browser,
struct evsel *evsel)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos;
browser->entries = RB_ROOT;
annotation__lock(notes);
symbol__calc_percent(sym, evsel);
list_for_each_entry(pos, ¬es->src->source, al.node) {
double max_percent = 0.0;
int i;
if (pos->al.offset == -1) {
RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
for (i = 0; i < pos->al.data_nr; i++) {
double percent;
percent = annotation_data__percent(&pos->al.data[i],
browser->opts->percent_type);
if (max_percent < percent)
max_percent = percent;
}
if (max_percent < 0.01 && pos->al.ipc == 0) {
RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
disasm_rb_tree__insert(browser, &pos->al);
}
annotation__unlock(notes);
browser->curr_hot = rb_last(&browser->entries);
}
static struct annotation_line *annotate_browser__find_next_asm_line(
struct annotate_browser *browser,
struct annotation_line *al)
{
struct annotation_line *it = al;
/* find next asm line */
list_for_each_entry_continue(it, browser->b.entries, node) {
if (it->idx_asm >= 0)
return it;
}
/* no asm line found forwards, try backwards */
it = al;
list_for_each_entry_continue_reverse(it, browser->b.entries, node) {
if (it->idx_asm >= 0)
return it;
}
/* There are no asm lines */
return NULL;
}
static bool annotate_browser__toggle_source(struct annotate_browser *browser)
{
struct annotation *notes = browser__annotation(&browser->b);
struct annotation_line *al;
off_t offset = browser->b.index - browser->b.top_idx;
browser->b.seek(&browser->b, offset, SEEK_CUR);
al = list_entry(browser->b.top, struct annotation_line, node);
if (notes->options->hide_src_code) {
if (al->idx_asm < offset)
offset = al->idx;
browser->b.nr_entries = notes->nr_entries;
notes->options->hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = al->idx - offset;
browser->b.index = al->idx;
} else {
if (al->idx_asm < 0) {
/* move cursor to next asm line */
al = annotate_browser__find_next_asm_line(browser, al);
if (!al) {
browser->b.seek(&browser->b, -offset, SEEK_CUR);
return false;
}
}
if (al->idx_asm < offset)
offset = al->idx_asm;
browser->b.nr_entries = notes->nr_asm_entries;
notes->options->hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = al->idx_asm - offset;
browser->b.index = al->idx_asm;
}
return true;
}
#define SYM_TITLE_MAX_SIZE (PATH_MAX + 64)
static void annotate_browser__show_full_location(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct disasm_line *cursor = disasm_line(ab->selection);
struct annotation_line *al = &cursor->al;
if (al->offset != -1)
ui_helpline__puts("Only available for source code lines.");
else if (al->fileloc == NULL)
ui_helpline__puts("No source file location.");
else {
char help_line[SYM_TITLE_MAX_SIZE];
sprintf (help_line, "Source file location: %s", al->fileloc);
ui_helpline__puts(help_line);
}
}
static void ui_browser__init_asm_mode(struct ui_browser *browser)
{
struct annotation *notes = browser__annotation(browser);
ui_browser__reset_index(browser);
browser->nr_entries = notes->nr_asm_entries;
}
static int sym_title(struct symbol *sym, struct map *map, char *title,
size_t sz, int percent_type)
{
return snprintf(title, sz, "%s %s [Percent: %s]", sym->name,
map__dso(map)->long_name,
percent_type_str(percent_type));
}
/*
* This can be called from external jumps, i.e. jumps from one function
* to another, like from the kernel's entry_SYSCALL_64 function to the
* swapgs_restore_regs_and_return_to_usermode() function.
*
* So all we check here is that dl->ops.target.sym is set, if it is, just
* go to that function and when exiting from its disassembly, come back
* to the calling function.
*/
static bool annotate_browser__callq(struct annotate_browser *browser,
struct evsel *evsel,
struct hist_browser_timer *hbt)
{
struct map_symbol *ms = browser->b.priv, target_ms;
struct disasm_line *dl = disasm_line(browser->selection);
struct annotation *notes;
char title[SYM_TITLE_MAX_SIZE];
if (!dl->ops.target.sym) {
ui_helpline__puts("The called function was not found.");
return true;
}
notes = symbol__annotation(dl->ops.target.sym);
annotation__lock(notes);
if (!symbol__hists(dl->ops.target.sym, evsel->evlist->core.nr_entries)) {
annotation__unlock(notes);
ui__warning("Not enough memory for annotating '%s' symbol!\n",
dl->ops.target.sym->name);
return true;
}
target_ms.maps = ms->maps;
target_ms.map = ms->map;
target_ms.sym = dl->ops.target.sym;
annotation__unlock(notes);
symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts);
sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type);
ui_browser__show_title(&browser->b, title);
return true;
}
static
struct disasm_line *annotate_browser__find_offset(struct annotate_browser *browser,
s64 offset, s64 *idx)
{
struct annotation *notes = browser__annotation(&browser->b);
struct disasm_line *pos;
*idx = 0;
list_for_each_entry(pos, ¬es->src->source, al.node) {
if (pos->al.offset == offset)
return pos;
if (!annotation_line__filter(&pos->al, notes))
++*idx;
}
return NULL;
}
static bool annotate_browser__jump(struct annotate_browser *browser,
struct evsel *evsel,
struct hist_browser_timer *hbt)
{
struct disasm_line *dl = disasm_line(browser->selection);
u64 offset;
s64 idx;
if (!ins__is_jump(&dl->ins))
return false;
if (dl->ops.target.outside) {
annotate_browser__callq(browser, evsel, hbt);
return true;
}
offset = dl->ops.target.offset;
dl = annotate_browser__find_offset(browser, offset, &idx);
if (dl == NULL) {
ui_helpline__printf("Invalid jump offset: %" PRIx64, offset);
return true;
}
annotate_browser__set_top(browser, &dl->al, idx);
return true;
}
static
struct annotation_line *annotate_browser__find_string(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct annotation *notes = browser__annotation(&browser->b);
struct annotation_line *al = browser->selection;
*idx = browser->b.index;
list_for_each_entry_continue(al, ¬es->src->source, node) {
if (annotation_line__filter(al, notes))
continue;
++*idx;
if (al->line && strstr(al->line, s) != NULL)
return al;
}
return NULL;
}
static bool __annotate_browser__search(struct annotate_browser *browser)
{
struct annotation_line *al;
s64 idx;
al = annotate_browser__find_string(browser, browser->search_bf, &idx);
if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = false;
return true;
}
static
struct annotation_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct annotation *notes = browser__annotation(&browser->b);
struct annotation_line *al = browser->selection;
*idx = browser->b.index;
list_for_each_entry_continue_reverse(al, ¬es->src->source, node) {
if (annotation_line__filter(al, notes))
continue;
--*idx;
if (al->line && strstr(al->line, s) != NULL)
return al;
}
return NULL;
}
static bool __annotate_browser__search_reverse(struct annotate_browser *browser)
{
struct annotation_line *al;
s64 idx;
al = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = true;
return true;
}
static bool annotate_browser__search_window(struct annotate_browser *browser,
int delay_secs)
{
if (ui_browser__input_window("Search", "String: ", browser->search_bf,
"ENTER: OK, ESC: Cancel",
delay_secs * 2) != K_ENTER ||
!*browser->search_bf)
return false;
return true;
}
static bool annotate_browser__search(struct annotate_browser *browser, int delay_secs)
{
if (annotate_browser__search_window(browser, delay_secs))
return __annotate_browser__search(browser);
return false;
}
static bool annotate_browser__continue_search(struct annotate_browser *browser,
int delay_secs)
{
if (!*browser->search_bf)
return annotate_browser__search(browser, delay_secs);
return __annotate_browser__search(browser);
}
static bool annotate_browser__search_reverse(struct annotate_browser *browser,
int delay_secs)
{
if (annotate_browser__search_window(browser, delay_secs))
return __annotate_browser__search_reverse(browser);
return false;
}
static
bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
int delay_secs)
{
if (!*browser->search_bf)
return annotate_browser__search_reverse(browser, delay_secs);
return __annotate_browser__search_reverse(browser);
}
static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct map_symbol *ms = browser->priv;
struct symbol *sym = ms->sym;
char symbol_dso[SYM_TITLE_MAX_SIZE];
if (ui_browser__show(browser, title, help) < 0)
return -1;
sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type);
ui_browser__gotorc_title(browser, 0, 0);
ui_browser__set_color(browser, HE_COLORSET_ROOT);
ui_browser__write_nstring(browser, symbol_dso, browser->width + 1);
return 0;
}
static void
switch_percent_type(struct annotation_options *opts, bool base)
{
switch (opts->percent_type) {
case PERCENT_HITS_LOCAL:
if (base)
opts->percent_type = PERCENT_PERIOD_LOCAL;
else
opts->percent_type = PERCENT_HITS_GLOBAL;
break;
case PERCENT_HITS_GLOBAL:
if (base)
opts->percent_type = PERCENT_PERIOD_GLOBAL;
else
opts->percent_type = PERCENT_HITS_LOCAL;
break;
case PERCENT_PERIOD_LOCAL:
if (base)
opts->percent_type = PERCENT_HITS_LOCAL;
else
opts->percent_type = PERCENT_PERIOD_GLOBAL;
break;
case PERCENT_PERIOD_GLOBAL:
if (base)
opts->percent_type = PERCENT_HITS_GLOBAL;
else
opts->percent_type = PERCENT_PERIOD_LOCAL;
break;
default:
WARN_ON(1);
}
}
static int annotate_browser__run(struct annotate_browser *browser,
struct evsel *evsel,
struct hist_browser_timer *hbt)
{
struct rb_node *nd = NULL;
struct hists *hists = evsel__hists(evsel);
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(ms->sym);
const char *help = "Press 'h' for help on key bindings";
int delay_secs = hbt ? hbt->refresh : 0;
char title[256];
int key;
hists__scnprintf_title(hists, title, sizeof(title));
if (annotate_browser__show(&browser->b, title, help) < 0)
return -1;
annotate_browser__calc_percent(browser, evsel);
if (browser->curr_hot) {
annotate_browser__set_rb_top(browser, browser->curr_hot);
browser->b.navkeypressed = false;
}
nd = browser->curr_hot;
while (1) {
key = ui_browser__run(&browser->b, delay_secs);
if (delay_secs != 0) {
annotate_browser__calc_percent(browser, evsel);
/*
* Current line focus got out of the list of most active
* lines, NULL it so that if TAB|UNTAB is pressed, we
* move to curr_hot (current hottest line).
*/
if (nd != NULL && RB_EMPTY_NODE(nd))
nd = NULL;
}
switch (key) {
case K_TIMER:
if (hbt)
hbt->timer(hbt->arg);
if (delay_secs != 0) {
symbol__annotate_decay_histogram(sym, evsel->core.idx);
hists__scnprintf_title(hists, title, sizeof(title));
annotate_browser__show(&browser->b, title, help);
}
continue;
case K_TAB:
if (nd != NULL) {
nd = rb_prev(nd);
if (nd == NULL)
nd = rb_last(&browser->entries);
} else
nd = browser->curr_hot;
break;
case K_UNTAB:
if (nd != NULL) {
nd = rb_next(nd);
if (nd == NULL)
nd = rb_first(&browser->entries);
} else
nd = browser->curr_hot;
break;
case K_F1:
case 'h':
ui_browser__help_window(&browser->b,
"UP/DOWN/PGUP\n"
"PGDN/SPACE Navigate\n"
"</> Move to prev/next symbol\n"
"q/ESC/CTRL+C Exit\n\n"
"ENTER Go to target\n"
"H Go to hottest instruction\n"
"TAB/shift+TAB Cycle thru hottest instructions\n"
"j Toggle showing jump to target arrows\n"
"J Toggle showing number of jump sources on targets\n"
"n Search next string\n"
"o Toggle disassembler output/simplified view\n"
"O Bump offset level (jump targets -> +call -> all -> cycle thru)\n"
"s Toggle source code view\n"
"t Circulate percent, total period, samples view\n"
"c Show min/max cycle\n"
"/ Search string\n"
"k Toggle line numbers\n"
"l Show full source file location\n"
"P Print to [symbol_name].annotation file.\n"
"r Run available scripts\n"
"p Toggle percent type [local/global]\n"
"b Toggle percent base [period/hits]\n"
"? Search string backwards\n"
"f Toggle showing offsets to full address\n");
continue;
case 'r':
script_browse(NULL, NULL);
annotate_browser__show(&browser->b, title, help);
continue;
case 'k':
notes->options->show_linenr = !notes->options->show_linenr;
continue;
case 'l':
annotate_browser__show_full_location (&browser->b);
continue;
case 'H':
nd = browser->curr_hot;
break;
case 's':
if (annotate_browser__toggle_source(browser))
ui_helpline__puts(help);
continue;
case 'o':
notes->options->use_offset = !notes->options->use_offset;
annotation__update_column_widths(notes);
continue;
case 'O':
if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
continue;
case 'j':
notes->options->jump_arrows = !notes->options->jump_arrows;
continue;
case 'J':
notes->options->show_nr_jumps = !notes->options->show_nr_jumps;
annotation__update_column_widths(notes);
continue;
case '/':
if (annotate_browser__search(browser, delay_secs)) {
show_help:
ui_helpline__puts(help);
}
continue;
case 'n':
if (browser->searching_backwards ?
annotate_browser__continue_search_reverse(browser, delay_secs) :
annotate_browser__continue_search(browser, delay_secs))
goto show_help;
continue;
case '?':
if (annotate_browser__search_reverse(browser, delay_secs))
goto show_help;
continue;
case 'D': {
static int seq;
ui_helpline__pop();
ui_helpline__fpush("%d: nr_ent=%d, height=%d, idx=%d, top_idx=%d, nr_asm_entries=%d",
seq++, browser->b.nr_entries,
browser->b.height,
browser->b.index,
browser->b.top_idx,
notes->nr_asm_entries);
}
continue;
case K_ENTER:
case K_RIGHT:
{
struct disasm_line *dl = disasm_line(browser->selection);
if (browser->selection == NULL)
ui_helpline__puts("Huh? No selection. Report to [email protected]");
else if (browser->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
else if (!dl->ins.ops)
goto show_sup_ins;
else if (ins__is_ret(&dl->ins))
goto out;
else if (!(annotate_browser__jump(browser, evsel, hbt) ||
annotate_browser__callq(browser, evsel, hbt))) {
show_sup_ins:
ui_helpline__puts("Actions are only available for function call/return & jump/branch instructions.");
}
continue;
}
case 'P':
map_symbol__annotation_dump(ms, evsel, browser->opts);
continue;
case 't':
if (symbol_conf.show_total_period) {
symbol_conf.show_total_period = false;
symbol_conf.show_nr_samples = true;
} else if (symbol_conf.show_nr_samples)
symbol_conf.show_nr_samples = false;
else
symbol_conf.show_total_period = true;
annotation__update_column_widths(notes);
continue;
case 'c':
if (notes->options->show_minmax_cycle)
notes->options->show_minmax_cycle = false;
else
notes->options->show_minmax_cycle = true;
annotation__update_column_widths(notes);
continue;
case 'p':
case 'b':
switch_percent_type(browser->opts, key == 'b');
hists__scnprintf_title(hists, title, sizeof(title));
annotate_browser__show(&browser->b, title, help);
continue;
case 'f':
annotation__toggle_full_addr(notes, ms);
continue;
case K_LEFT:
case '<':
case '>':
case K_ESC:
case 'q':
case CTRL('c'):
goto out;
default:
continue;
}
if (nd != NULL)
annotate_browser__set_rb_top(browser, nd);
}
out:
ui_browser__hide(&browser->b);
return key;
}
int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *opts)
{
return symbol__tui_annotate(ms, evsel, hbt, opts);
}
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *opts)
{
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts);
}
int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *opts)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct annotate_browser browser = {
.b = {
.refresh = annotate_browser__refresh,
.seek = ui_browser__list_head_seek,
.write = annotate_browser__write,
.filter = disasm_line__filter,
.extra_title_lines = 1, /* for hists__scnprintf_title() */
.priv = ms,
.use_navkeypressed = true,
},
.opts = opts,
};
struct dso *dso;
int ret = -1, err;
int not_annotated = list_empty(¬es->src->source);
if (sym == NULL)
return -1;
dso = map__dso(ms->map);
if (dso->annotate_warned)
return -1;
if (not_annotated) {
err = symbol__annotate2(ms, evsel, opts, &browser.arch);
if (err) {
char msg[BUFSIZ];
dso->annotate_warned = true;
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
goto out_free_offsets;
}
}
ui_helpline__push("Press ESC to exit");
browser.b.width = notes->max_line_len;
browser.b.nr_entries = notes->nr_entries;
browser.b.entries = ¬es->src->source,
browser.b.width += 18; /* Percentage */
if (notes->options->hide_src_code)
ui_browser__init_asm_mode(&browser.b);
ret = annotate_browser__run(&browser, evsel, hbt);
if(not_annotated)
annotated_source__purge(notes->src);
out_free_offsets:
if(not_annotated)
zfree(¬es->offsets);
return ret;
}
| linux-master | tools/perf/ui/browsers/annotate.c |
// SPDX-License-Identifier: GPL-2.0
#include <elf.h>
#include <inttypes.h>
#include <sys/ttydefaults.h>
#include <stdlib.h>
#include <string.h>
#include <linux/bitops.h>
#include "../../util/debug.h"
#include "../../util/map.h"
#include "../../util/dso.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../helpline.h"
#include "../keysyms.h"
#include "map.h"
#include <linux/ctype.h>
struct map_browser {
struct ui_browser b;
struct map *map;
u8 addrlen;
};
static void map_browser__write(struct ui_browser *browser, void *nd, int row)
{
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
struct map_browser *mb = container_of(browser, struct map_browser, b);
bool current_entry = ui_browser__is_current_entry(browser, row);
int width;
ui_browser__set_percent_color(browser, 0, current_entry);
ui_browser__printf(browser, "%*" PRIx64 " %*" PRIx64 " %c ",
mb->addrlen, sym->start, mb->addrlen, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w');
width = browser->width - ((mb->addrlen * 2) + 4);
if (width > 0)
ui_browser__write_nstring(browser, sym->name, width);
}
/* FIXME uber-kludgy, see comment on cmd_report... */
static u32 *symbol__browser_index(struct symbol *browser)
{
return ((void *)browser) - sizeof(struct rb_node) - sizeof(u32);
}
static int map_browser__search(struct map_browser *browser)
{
char target[512];
struct symbol *sym;
int err = ui_browser__input_window("Search by name/addr",
"Prefix with 0x to search by address",
target, "ENTER: OK, ESC: Cancel", 0);
if (err != K_ENTER)
return -1;
if (target[0] == '0' && tolower(target[1]) == 'x') {
u64 addr = strtoull(target, NULL, 16);
sym = map__find_symbol(browser->map, addr);
} else
sym = map__find_symbol_by_name(browser->map, target);
if (sym != NULL) {
u32 *idx = symbol__browser_index(sym);
browser->b.top = &sym->rb_node;
browser->b.index = browser->b.top_idx = *idx;
} else
ui_helpline__fpush("%s not found!", target);
return 0;
}
static int map_browser__run(struct map_browser *browser)
{
int key;
if (ui_browser__show(&browser->b, map__dso(browser->map)->long_name,
"Press ESC to exit, %s / to search",
verbose > 0 ? "" : "restart with -v to use") < 0)
return -1;
while (1) {
key = ui_browser__run(&browser->b, 0);
switch (key) {
case '/':
if (verbose > 0)
map_browser__search(browser);
default:
break;
case K_LEFT:
case K_ESC:
case 'q':
case CTRL('c'):
goto out;
}
}
out:
ui_browser__hide(&browser->b);
return key;
}
int map__browse(struct map *map)
{
struct map_browser mb = {
.b = {
.entries = &map__dso(map)->symbols,
.refresh = ui_browser__rb_tree_refresh,
.seek = ui_browser__rb_tree_seek,
.write = map_browser__write,
},
.map = map,
};
struct rb_node *nd;
char tmp[BITS_PER_LONG / 4];
u64 maxaddr = 0;
for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
if (maxaddr < pos->end)
maxaddr = pos->end;
if (verbose > 0) {
u32 *idx = symbol__browser_index(pos);
*idx = mb.b.nr_entries;
}
++mb.b.nr_entries;
}
mb.addrlen = snprintf(tmp, sizeof(tmp), "%" PRIx64, maxaddr);
return map_browser__run(&mb);
}
| linux-master | tools/perf/ui/browsers/map.c |
// SPDX-License-Identifier: GPL-2.0
#include "ui/browser.h"
#include "ui/keysyms.h"
#include "ui/ui.h"
#include "ui/util.h"
#include "ui/libslang.h"
#include "util/header.h"
#include "util/session.h"
#include <sys/ttydefaults.h>
static void ui_browser__argv_write(struct ui_browser *browser,
void *entry, int row)
{
char **arg = entry;
char *str = *arg;
char empty[] = " ";
bool current_entry = ui_browser__is_current_entry(browser, row);
unsigned long offset = (unsigned long)browser->priv;
if (offset >= strlen(str))
str = empty;
else
str = str + offset;
ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
HE_COLORSET_NORMAL);
ui_browser__write_nstring(browser, str, browser->width);
}
static int list_menu__run(struct ui_browser *menu)
{
int key;
unsigned long offset;
static const char help[] =
"h/?/F1 Show this window\n"
"UP/DOWN/PGUP\n"
"PGDN/SPACE\n"
"LEFT/RIGHT Navigate\n"
"q/ESC/CTRL+C Exit browser";
if (ui_browser__show(menu, "Header information", "Press 'q' to exit") < 0)
return -1;
while (1) {
key = ui_browser__run(menu, 0);
switch (key) {
case K_RIGHT:
offset = (unsigned long)menu->priv;
offset += 10;
menu->priv = (void *)offset;
continue;
case K_LEFT:
offset = (unsigned long)menu->priv;
if (offset >= 10)
offset -= 10;
menu->priv = (void *)offset;
continue;
case K_F1:
case 'h':
case '?':
ui_browser__help_window(menu, help);
continue;
case K_ESC:
case 'q':
case CTRL('c'):
key = -1;
break;
default:
continue;
}
break;
}
ui_browser__hide(menu);
return key;
}
static int ui__list_menu(int argc, char * const argv[])
{
struct ui_browser menu = {
.entries = (void *)argv,
.refresh = ui_browser__argv_refresh,
.seek = ui_browser__argv_seek,
.write = ui_browser__argv_write,
.nr_entries = argc,
};
return list_menu__run(&menu);
}
int tui__header_window(struct perf_env *env)
{
int i, argc = 0;
char **argv;
struct perf_session *session;
char *ptr, *pos;
size_t size;
FILE *fp = open_memstream(&ptr, &size);
session = container_of(env, struct perf_session, header.env);
perf_header__fprintf_info(session, fp, true);
fclose(fp);
for (pos = ptr, argc = 0; (pos = strchr(pos, '\n')) != NULL; pos++)
argc++;
argv = calloc(argc + 1, sizeof(*argv));
if (argv == NULL)
goto out;
argv[0] = pos = ptr;
for (i = 1; (pos = strchr(pos, '\n')) != NULL; i++) {
*pos++ = '\0';
argv[i] = pos;
}
BUG_ON(i != argc + 1);
ui__list_menu(argc, argv);
out:
free(argv);
free(ptr);
return 0;
}
| linux-master | tools/perf/ui/browsers/header.c |
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <errno.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <linux/rbtree.h>
#include <linux/string.h>
#include <sys/ttydefaults.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include "../../util/debug.h"
#include "../../util/dso.h"
#include "../../util/callchain.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include "../../util/header.h"
#include "../../util/hist.h"
#include "../../util/machine.h"
#include "../../util/map.h"
#include "../../util/maps.h"
#include "../../util/symbol.h"
#include "../../util/map_symbol.h"
#include "../../util/branch.h"
#include "../../util/pstack.h"
#include "../../util/sort.h"
#include "../../util/top.h"
#include "../../util/thread.h"
#include "../../util/block-info.h"
#include "../../util/util.h"
#include "../../arch/common.h"
#include "../browsers/hists.h"
#include "../helpline.h"
#include "../util.h"
#include "../ui.h"
#include "map.h"
#include "annotate.h"
#include "srcline.h"
#include "string2.h"
#include "units.h"
#include "time-utils.h"
#include <linux/ctype.h>
extern void hist_browser__init_hpp(void);
static int hists_browser__scnprintf_title(struct hist_browser *browser, char *bf, size_t size);
static void hist_browser__update_nr_entries(struct hist_browser *hb);
static struct rb_node *hists__filter_entries(struct rb_node *nd,
float min_pcnt);
static bool hist_browser__has_filter(struct hist_browser *hb)
{
return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter || hb->c2c_filter;
}
static int hist_browser__get_folding(struct hist_browser *browser)
{
struct rb_node *nd;
struct hists *hists = browser->hists;
int unfolded_rows = 0;
for (nd = rb_first_cached(&hists->entries);
(nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
nd = rb_hierarchy_next(nd)) {
struct hist_entry *he =
rb_entry(nd, struct hist_entry, rb_node);
if (he->leaf && he->unfolded)
unfolded_rows += he->nr_rows;
}
return unfolded_rows;
}
static void hist_browser__set_title_space(struct hist_browser *hb)
{
struct ui_browser *browser = &hb->b;
struct hists *hists = hb->hists;
struct perf_hpp_list *hpp_list = hists->hpp_list;
browser->extra_title_lines = hb->show_headers ? hpp_list->nr_header_lines : 0;
}
static u32 hist_browser__nr_entries(struct hist_browser *hb)
{
u32 nr_entries;
if (symbol_conf.report_hierarchy)
nr_entries = hb->nr_hierarchy_entries;
else if (hist_browser__has_filter(hb))
nr_entries = hb->nr_non_filtered_entries;
else
nr_entries = hb->hists->nr_entries;
hb->nr_callchain_rows = hist_browser__get_folding(hb);
return nr_entries + hb->nr_callchain_rows;
}
static void hist_browser__update_rows(struct hist_browser *hb)
{
struct ui_browser *browser = &hb->b;
struct hists *hists = hb->hists;
struct perf_hpp_list *hpp_list = hists->hpp_list;
u16 index_row;
if (!hb->show_headers) {
browser->rows += browser->extra_title_lines;
browser->extra_title_lines = 0;
return;
}
browser->extra_title_lines = hpp_list->nr_header_lines;
browser->rows -= browser->extra_title_lines;
/*
* Verify if we were at the last line and that line isn't
* visible because we now show the header line(s).
*/
index_row = browser->index - browser->top_idx;
if (index_row >= browser->rows)
browser->index -= index_row - browser->rows + 1;
}
static void hist_browser__refresh_dimensions(struct ui_browser *browser)
{
struct hist_browser *hb = container_of(browser, struct hist_browser, b);
/* 3 == +/- toggle symbol before actual hist_entry rendering */
browser->width = 3 + (hists__sort_list_width(hb->hists) + sizeof("[k]"));
/*
* FIXME: Just keeping existing behaviour, but this really should be
* before updating browser->width, as it will invalidate the
* calculation above. Fix this and the fallout in another
* changeset.
*/
ui_browser__refresh_dimensions(browser);
}
static void hist_browser__reset(struct hist_browser *browser)
{
/*
* The hists__remove_entry_filter() already folds non-filtered
* entries so we can assume it has 0 callchain rows.
*/
browser->nr_callchain_rows = 0;
hist_browser__update_nr_entries(browser);
browser->b.nr_entries = hist_browser__nr_entries(browser);
hist_browser__refresh_dimensions(&browser->b);
ui_browser__reset_index(&browser->b);
}
static char tree__folded_sign(bool unfolded)
{
return unfolded ? '-' : '+';
}
static char hist_entry__folded(const struct hist_entry *he)
{
return he->has_children ? tree__folded_sign(he->unfolded) : ' ';
}
static char callchain_list__folded(const struct callchain_list *cl)
{
return cl->has_children ? tree__folded_sign(cl->unfolded) : ' ';
}
static void callchain_list__set_folding(struct callchain_list *cl, bool unfold)
{
cl->unfolded = unfold ? cl->has_children : false;
}
static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
{
int n = 0;
struct rb_node *nd;
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
char folded_sign = ' '; /* No children */
list_for_each_entry(chain, &child->val, list) {
++n;
/* We need this because we may not have children */
folded_sign = callchain_list__folded(chain);
if (folded_sign == '+')
break;
}
if (folded_sign == '-') /* Have children and they're unfolded */
n += callchain_node__count_rows_rb_tree(child);
}
return n;
}
static int callchain_node__count_flat_rows(struct callchain_node *node)
{
struct callchain_list *chain;
char folded_sign = 0;
int n = 0;
list_for_each_entry(chain, &node->parent_val, list) {
if (!folded_sign) {
/* only check first chain list entry */
folded_sign = callchain_list__folded(chain);
if (folded_sign == '+')
return 1;
}
n++;
}
list_for_each_entry(chain, &node->val, list) {
if (!folded_sign) {
/* node->parent_val list might be empty */
folded_sign = callchain_list__folded(chain);
if (folded_sign == '+')
return 1;
}
n++;
}
return n;
}
static int callchain_node__count_folded_rows(struct callchain_node *node __maybe_unused)
{
return 1;
}
static int callchain_node__count_rows(struct callchain_node *node)
{
struct callchain_list *chain;
bool unfolded = false;
int n = 0;
if (callchain_param.mode == CHAIN_FLAT)
return callchain_node__count_flat_rows(node);
else if (callchain_param.mode == CHAIN_FOLDED)
return callchain_node__count_folded_rows(node);
list_for_each_entry(chain, &node->val, list) {
++n;
unfolded = chain->unfolded;
}
if (unfolded)
n += callchain_node__count_rows_rb_tree(node);
return n;
}
static int callchain__count_rows(struct rb_root *chain)
{
struct rb_node *nd;
int n = 0;
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
n += callchain_node__count_rows(node);
}
return n;
}
static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he,
bool include_children)
{
int count = 0;
struct rb_node *node;
struct hist_entry *child;
if (he->leaf)
return callchain__count_rows(&he->sorted_chain);
if (he->has_no_entry)
return 1;
node = rb_first_cached(&he->hroot_out);
while (node) {
float percent;
child = rb_entry(node, struct hist_entry, rb_node);
percent = hist_entry__get_percent_limit(child);
if (!child->filtered && percent >= hb->min_pcnt) {
count++;
if (include_children && child->unfolded)
count += hierarchy_count_rows(hb, child, true);
}
node = rb_next(node);
}
return count;
}
static bool hist_entry__toggle_fold(struct hist_entry *he)
{
if (!he)
return false;
if (!he->has_children)
return false;
he->unfolded = !he->unfolded;
return true;
}
static bool callchain_list__toggle_fold(struct callchain_list *cl)
{
if (!cl)
return false;
if (!cl->has_children)
return false;
cl->unfolded = !cl->unfolded;
return true;
}
static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
{
struct rb_node *nd = rb_first(&node->rb_root);
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool first = true;
list_for_each_entry(chain, &child->val, list) {
if (first) {
first = false;
chain->has_children = chain->list.next != &child->val ||
!RB_EMPTY_ROOT(&child->rb_root);
} else
chain->has_children = chain->list.next == &child->val &&
!RB_EMPTY_ROOT(&child->rb_root);
}
callchain_node__init_have_children_rb_tree(child);
}
}
static void callchain_node__init_have_children(struct callchain_node *node,
bool has_sibling)
{
struct callchain_list *chain;
chain = list_entry(node->val.next, struct callchain_list, list);
chain->has_children = has_sibling;
if (!list_empty(&node->val)) {
chain = list_entry(node->val.prev, struct callchain_list, list);
chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
}
callchain_node__init_have_children_rb_tree(node);
}
static void callchain__init_have_children(struct rb_root *root)
{
struct rb_node *nd = rb_first(root);
bool has_sibling = nd && rb_next(nd);
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
callchain_node__init_have_children(node, has_sibling);
if (callchain_param.mode == CHAIN_FLAT ||
callchain_param.mode == CHAIN_FOLDED)
callchain_node__make_parent_list(node);
}
}
static void hist_entry__init_have_children(struct hist_entry *he)
{
if (he->init_have_children)
return;
if (he->leaf) {
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
} else {
he->has_children = !RB_EMPTY_ROOT(&he->hroot_out.rb_root);
}
he->init_have_children = true;
}
static bool hist_browser__selection_has_children(struct hist_browser *browser)
{
struct hist_entry *he = browser->he_selection;
struct map_symbol *ms = browser->selection;
if (!he || !ms)
return false;
if (ms == &he->ms)
return he->has_children;
return container_of(ms, struct callchain_list, ms)->has_children;
}
static bool hist_browser__selection_unfolded(struct hist_browser *browser)
{
struct hist_entry *he = browser->he_selection;
struct map_symbol *ms = browser->selection;
if (!he || !ms)
return false;
if (ms == &he->ms)
return he->unfolded;
return container_of(ms, struct callchain_list, ms)->unfolded;
}
static char *hist_browser__selection_sym_name(struct hist_browser *browser, char *bf, size_t size)
{
struct hist_entry *he = browser->he_selection;
struct map_symbol *ms = browser->selection;
struct callchain_list *callchain_entry;
if (!he || !ms)
return NULL;
if (ms == &he->ms) {
hist_entry__sym_snprintf(he, bf, size, 0);
return bf + 4; // skip the level, e.g. '[k] '
}
callchain_entry = container_of(ms, struct callchain_list, ms);
return callchain_list__sym_name(callchain_entry, bf, size, browser->show_dso);
}
static bool hist_browser__toggle_fold(struct hist_browser *browser)
{
struct hist_entry *he = browser->he_selection;
struct map_symbol *ms = browser->selection;
struct callchain_list *cl = container_of(ms, struct callchain_list, ms);
bool has_children;
if (!he || !ms)
return false;
if (ms == &he->ms)
has_children = hist_entry__toggle_fold(he);
else
has_children = callchain_list__toggle_fold(cl);
if (has_children) {
int child_rows = 0;
hist_entry__init_have_children(he);
browser->b.nr_entries -= he->nr_rows;
if (he->leaf)
browser->nr_callchain_rows -= he->nr_rows;
else
browser->nr_hierarchy_entries -= he->nr_rows;
if (symbol_conf.report_hierarchy)
child_rows = hierarchy_count_rows(browser, he, true);
if (he->unfolded) {
if (he->leaf)
he->nr_rows = callchain__count_rows(
&he->sorted_chain);
else
he->nr_rows = hierarchy_count_rows(browser, he, false);
/* account grand children */
if (symbol_conf.report_hierarchy)
browser->b.nr_entries += child_rows - he->nr_rows;
if (!he->leaf && he->nr_rows == 0) {
he->has_no_entry = true;
he->nr_rows = 1;
}
} else {
if (symbol_conf.report_hierarchy)
browser->b.nr_entries -= child_rows - he->nr_rows;
if (he->has_no_entry)
he->has_no_entry = false;
he->nr_rows = 0;
}
browser->b.nr_entries += he->nr_rows;
if (he->leaf)
browser->nr_callchain_rows += he->nr_rows;
else
browser->nr_hierarchy_entries += he->nr_rows;
return true;
}
/* If it doesn't have children, no toggling performed */
return false;
}
static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
{
int n = 0;
struct rb_node *nd;
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool has_children = false;
list_for_each_entry(chain, &child->val, list) {
++n;
callchain_list__set_folding(chain, unfold);
has_children = chain->has_children;
}
if (has_children)
n += callchain_node__set_folding_rb_tree(child, unfold);
}
return n;
}
static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
{
struct callchain_list *chain;
bool has_children = false;
int n = 0;
list_for_each_entry(chain, &node->val, list) {
++n;
callchain_list__set_folding(chain, unfold);
has_children = chain->has_children;
}
if (has_children)
n += callchain_node__set_folding_rb_tree(node, unfold);
return n;
}
static int callchain__set_folding(struct rb_root *chain, bool unfold)
{
struct rb_node *nd;
int n = 0;
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
n += callchain_node__set_folding(node, unfold);
}
return n;
}
static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
bool unfold __maybe_unused)
{
float percent;
struct rb_node *nd;
struct hist_entry *child;
int n = 0;
for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) {
child = rb_entry(nd, struct hist_entry, rb_node);
percent = hist_entry__get_percent_limit(child);
if (!child->filtered && percent >= hb->min_pcnt)
n++;
}
return n;
}
static void hist_entry__set_folding(struct hist_entry *he,
struct hist_browser *hb, bool unfold)
{
hist_entry__init_have_children(he);
he->unfolded = unfold ? he->has_children : false;
if (he->has_children) {
int n;
if (he->leaf)
n = callchain__set_folding(&he->sorted_chain, unfold);
else
n = hierarchy_set_folding(hb, he, unfold);
he->nr_rows = unfold ? n : 0;
} else
he->nr_rows = 0;
}
static void
__hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
struct rb_node *nd;
struct hist_entry *he;
double percent;
nd = rb_first_cached(&browser->hists->entries);
while (nd) {
he = rb_entry(nd, struct hist_entry, rb_node);
/* set folding state even if it's currently folded */
nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
hist_entry__set_folding(he, browser, unfold);
percent = hist_entry__get_percent_limit(he);
if (he->filtered || percent < browser->min_pcnt)
continue;
if (!he->depth || unfold)
browser->nr_hierarchy_entries++;
if (he->leaf)
browser->nr_callchain_rows += he->nr_rows;
else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
browser->nr_hierarchy_entries++;
he->has_no_entry = true;
he->nr_rows = 1;
} else
he->has_no_entry = false;
}
}
static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
browser->nr_hierarchy_entries = 0;
browser->nr_callchain_rows = 0;
__hist_browser__set_folding(browser, unfold);
browser->b.nr_entries = hist_browser__nr_entries(browser);
/* Go to the start, we may be way after valid entries after a collapse */
ui_browser__reset_index(&browser->b);
}
static void hist_browser__set_folding_selected(struct hist_browser *browser, bool unfold)
{
if (!browser->he_selection)
return;
if (unfold == browser->he_selection->unfolded)
return;
hist_browser__toggle_fold(browser);
}
static void ui_browser__warn_lost_events(struct ui_browser *browser)
{
ui_browser__warning(browser, 4,
"Events are being lost, check IO/CPU overload!\n\n"
"You may want to run 'perf' using a RT scheduler policy:\n\n"
" perf top -r 80\n\n"
"Or reduce the sampling frequency.");
}
static int hist_browser__title(struct hist_browser *browser, char *bf, size_t size)
{
return browser->title ? browser->title(browser, bf, size) : 0;
}
static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_lost_event, char *title, size_t size, int key)
{
switch (key) {
case K_TIMER: {
struct hist_browser_timer *hbt = browser->hbt;
struct evsel *evsel = hists_to_evsel(browser->hists);
u64 nr_entries;
WARN_ON_ONCE(!hbt);
if (hbt)
hbt->timer(hbt->arg);
if (hist_browser__has_filter(browser) || symbol_conf.report_hierarchy)
hist_browser__update_nr_entries(browser);
nr_entries = hist_browser__nr_entries(browser);
ui_browser__update_nr_entries(&browser->b, nr_entries);
if (warn_lost_event &&
(evsel->evlist->stats.nr_lost_warned !=
evsel->evlist->stats.nr_events[PERF_RECORD_LOST])) {
evsel->evlist->stats.nr_lost_warned =
evsel->evlist->stats.nr_events[PERF_RECORD_LOST];
ui_browser__warn_lost_events(&browser->b);
}
hist_browser__title(browser, title, size);
ui_browser__show_title(&browser->b, title);
break;
}
case 'D': { /* Debug */
struct hist_entry *h = rb_entry(browser->b.top, struct hist_entry, rb_node);
static int seq;
ui_helpline__pop();
ui_helpline__fpush("%d: nr_ent=(%d,%d), etl: %d, rows=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
seq++, browser->b.nr_entries, browser->hists->nr_entries,
browser->b.extra_title_lines, browser->b.rows,
browser->b.index, browser->b.top_idx, h->row_offset, h->nr_rows);
}
break;
case 'C':
/* Collapse the whole world. */
hist_browser__set_folding(browser, false);
break;
case 'c':
/* Collapse the selected entry. */
hist_browser__set_folding_selected(browser, false);
break;
case 'E':
/* Expand the whole world. */
hist_browser__set_folding(browser, true);
break;
case 'e':
/* Toggle expand/collapse the selected entry. */
hist_browser__toggle_fold(browser);
break;
case 'H':
browser->show_headers = !browser->show_headers;
hist_browser__update_rows(browser);
break;
case '+':
if (hist_browser__toggle_fold(browser))
break;
/* fall thru */
default:
return -1;
}
return 0;
}
int hist_browser__run(struct hist_browser *browser, const char *help,
bool warn_lost_event, int key)
{
char title[160];
struct hist_browser_timer *hbt = browser->hbt;
int delay_secs = hbt ? hbt->refresh : 0;
browser->b.entries = &browser->hists->entries;
browser->b.nr_entries = hist_browser__nr_entries(browser);
hist_browser__title(browser, title, sizeof(title));
if (ui_browser__show(&browser->b, title, "%s", help) < 0)
return -1;
if (key && hist_browser__handle_hotkey(browser, warn_lost_event, title, sizeof(title), key))
goto out;
while (1) {
key = ui_browser__run(&browser->b, delay_secs);
if (hist_browser__handle_hotkey(browser, warn_lost_event, title, sizeof(title), key))
break;
}
out:
ui_browser__hide(&browser->b);
return key;
}
struct callchain_print_arg {
/* for hists browser */
off_t row_offset;
bool is_current_entry;
/* for file dump */
FILE *fp;
int printed;
};
typedef void (*print_callchain_entry_fn)(struct hist_browser *browser,
struct callchain_list *chain,
const char *str, int offset,
unsigned short row,
struct callchain_print_arg *arg);
static void hist_browser__show_callchain_entry(struct hist_browser *browser,
struct callchain_list *chain,
const char *str, int offset,
unsigned short row,
struct callchain_print_arg *arg)
{
int color, width;
char folded_sign = callchain_list__folded(chain);
bool show_annotated = browser->show_dso && chain->ms.sym && symbol__annotation(chain->ms.sym)->src;
color = HE_COLORSET_NORMAL;
width = browser->b.width - (offset + 2);
if (ui_browser__is_current_entry(&browser->b, row)) {
browser->selection = &chain->ms;
color = HE_COLORSET_SELECTED;
arg->is_current_entry = true;
}
ui_browser__set_color(&browser->b, color);
ui_browser__gotorc(&browser->b, row, 0);
ui_browser__write_nstring(&browser->b, " ", offset);
ui_browser__printf(&browser->b, "%c", folded_sign);
ui_browser__write_graph(&browser->b, show_annotated ? SLSMG_RARROW_CHAR : ' ');
ui_browser__write_nstring(&browser->b, str, width);
}
static void hist_browser__fprintf_callchain_entry(struct hist_browser *b __maybe_unused,
struct callchain_list *chain,
const char *str, int offset,
unsigned short row __maybe_unused,
struct callchain_print_arg *arg)
{
char folded_sign = callchain_list__folded(chain);
arg->printed += fprintf(arg->fp, "%*s%c %s\n", offset, " ",
folded_sign, str);
}
typedef bool (*check_output_full_fn)(struct hist_browser *browser,
unsigned short row);
static bool hist_browser__check_output_full(struct hist_browser *browser,
unsigned short row)
{
return browser->b.rows == row;
}
static bool hist_browser__check_dump_full(struct hist_browser *browser __maybe_unused,
unsigned short row __maybe_unused)
{
return false;
}
#define LEVEL_OFFSET_STEP 3
static int hist_browser__show_callchain_list(struct hist_browser *browser,
struct callchain_node *node,
struct callchain_list *chain,
unsigned short row, u64 total,
bool need_percent, int offset,
print_callchain_entry_fn print,
struct callchain_print_arg *arg)
{
char bf[1024], *alloc_str;
char buf[64], *alloc_str2;
const char *str;
int ret = 1;
if (arg->row_offset != 0) {
arg->row_offset--;
return 0;
}
alloc_str = NULL;
alloc_str2 = NULL;
str = callchain_list__sym_name(chain, bf, sizeof(bf),
browser->show_dso);
if (symbol_conf.show_branchflag_count) {
callchain_list_counts__printf_value(chain, NULL,
buf, sizeof(buf));
if (asprintf(&alloc_str2, "%s%s", str, buf) < 0)
str = "Not enough memory!";
else
str = alloc_str2;
}
if (need_percent) {
callchain_node__scnprintf_value(node, buf, sizeof(buf),
total);
if (asprintf(&alloc_str, "%s %s", buf, str) < 0)
str = "Not enough memory!";
else
str = alloc_str;
}
print(browser, chain, str, offset, row, arg);
free(alloc_str);
free(alloc_str2);
return ret;
}
static bool check_percent_display(struct rb_node *node, u64 parent_total)
{
struct callchain_node *child;
if (node == NULL)
return false;
if (rb_next(node))
return true;
child = rb_entry(node, struct callchain_node, rb_node);
return callchain_cumul_hits(child) != parent_total;
}
static int hist_browser__show_callchain_flat(struct hist_browser *browser,
struct rb_root *root,
unsigned short row, u64 total,
u64 parent_total,
print_callchain_entry_fn print,
struct callchain_print_arg *arg,
check_output_full_fn is_output_full)
{
struct rb_node *node;
int first_row = row, offset = LEVEL_OFFSET_STEP;
bool need_percent;
node = rb_first(root);
need_percent = check_percent_display(node, parent_total);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
struct callchain_list *chain;
char folded_sign = ' ';
int first = true;
int extra_offset = 0;
list_for_each_entry(chain, &child->parent_val, list) {
bool was_first = first;
if (first)
first = false;
else if (need_percent)
extra_offset = LEVEL_OFFSET_STEP;
folded_sign = callchain_list__folded(chain);
row += hist_browser__show_callchain_list(browser, child,
chain, row, total,
was_first && need_percent,
offset + extra_offset,
print, arg);
if (is_output_full(browser, row))
goto out;
if (folded_sign == '+')
goto next;
}
list_for_each_entry(chain, &child->val, list) {
bool was_first = first;
if (first)
first = false;
else if (need_percent)
extra_offset = LEVEL_OFFSET_STEP;
folded_sign = callchain_list__folded(chain);
row += hist_browser__show_callchain_list(browser, child,
chain, row, total,
was_first && need_percent,
offset + extra_offset,
print, arg);
if (is_output_full(browser, row))
goto out;
if (folded_sign == '+')
break;
}
next:
if (is_output_full(browser, row))
break;
node = next;
}
out:
return row - first_row;
}
static char *hist_browser__folded_callchain_str(struct hist_browser *browser,
struct callchain_list *chain,
char *value_str, char *old_str)
{
char bf[1024];
const char *str;
char *new;
str = callchain_list__sym_name(chain, bf, sizeof(bf),
browser->show_dso);
if (old_str) {
if (asprintf(&new, "%s%s%s", old_str,
symbol_conf.field_sep ?: ";", str) < 0)
new = NULL;
} else {
if (value_str) {
if (asprintf(&new, "%s %s", value_str, str) < 0)
new = NULL;
} else {
if (asprintf(&new, "%s", str) < 0)
new = NULL;
}
}
return new;
}
static int hist_browser__show_callchain_folded(struct hist_browser *browser,
struct rb_root *root,
unsigned short row, u64 total,
u64 parent_total,
print_callchain_entry_fn print,
struct callchain_print_arg *arg,
check_output_full_fn is_output_full)
{
struct rb_node *node;
int first_row = row, offset = LEVEL_OFFSET_STEP;
bool need_percent;
node = rb_first(root);
need_percent = check_percent_display(node, parent_total);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
struct callchain_list *chain, *first_chain = NULL;
int first = true;
char *value_str = NULL, *value_str_alloc = NULL;
char *chain_str = NULL, *chain_str_alloc = NULL;
if (arg->row_offset != 0) {
arg->row_offset--;
goto next;
}
if (need_percent) {
char buf[64];
callchain_node__scnprintf_value(child, buf, sizeof(buf), total);
if (asprintf(&value_str, "%s", buf) < 0) {
value_str = (char *)"<...>";
goto do_print;
}
value_str_alloc = value_str;
}
list_for_each_entry(chain, &child->parent_val, list) {
chain_str = hist_browser__folded_callchain_str(browser,
chain, value_str, chain_str);
if (first) {
first = false;
first_chain = chain;
}
if (chain_str == NULL) {
chain_str = (char *)"Not enough memory!";
goto do_print;
}
chain_str_alloc = chain_str;
}
list_for_each_entry(chain, &child->val, list) {
chain_str = hist_browser__folded_callchain_str(browser,
chain, value_str, chain_str);
if (first) {
first = false;
first_chain = chain;
}
if (chain_str == NULL) {
chain_str = (char *)"Not enough memory!";
goto do_print;
}
chain_str_alloc = chain_str;
}
do_print:
print(browser, first_chain, chain_str, offset, row++, arg);
free(value_str_alloc);
free(chain_str_alloc);
next:
if (is_output_full(browser, row))
break;
node = next;
}
return row - first_row;
}
static int hist_browser__show_callchain_graph(struct hist_browser *browser,
struct rb_root *root, int level,
unsigned short row, u64 total,
u64 parent_total,
print_callchain_entry_fn print,
struct callchain_print_arg *arg,
check_output_full_fn is_output_full)
{
struct rb_node *node;
int first_row = row, offset = level * LEVEL_OFFSET_STEP;
bool need_percent;
u64 percent_total = total;
if (callchain_param.mode == CHAIN_GRAPH_REL)
percent_total = parent_total;
node = rb_first(root);
need_percent = check_percent_display(node, parent_total);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
struct callchain_list *chain;
char folded_sign = ' ';
int first = true;
int extra_offset = 0;
list_for_each_entry(chain, &child->val, list) {
bool was_first = first;
if (first)
first = false;
else if (need_percent)
extra_offset = LEVEL_OFFSET_STEP;
folded_sign = callchain_list__folded(chain);
row += hist_browser__show_callchain_list(browser, child,
chain, row, percent_total,
was_first && need_percent,
offset + extra_offset,
print, arg);
if (is_output_full(browser, row))
goto out;
if (folded_sign == '+')
break;
}
if (folded_sign == '-') {
const int new_level = level + (extra_offset ? 2 : 1);
row += hist_browser__show_callchain_graph(browser, &child->rb_root,
new_level, row, total,
child->children_hit,
print, arg, is_output_full);
}
if (is_output_full(browser, row))
break;
node = next;
}
out:
return row - first_row;
}
static int hist_browser__show_callchain(struct hist_browser *browser,
struct hist_entry *entry, int level,
unsigned short row,
print_callchain_entry_fn print,
struct callchain_print_arg *arg,
check_output_full_fn is_output_full)
{
u64 total = hists__total_period(entry->hists);
u64 parent_total;
int printed;
if (symbol_conf.cumulate_callchain)
parent_total = entry->stat_acc->period;
else
parent_total = entry->stat.period;
if (callchain_param.mode == CHAIN_FLAT) {
printed = hist_browser__show_callchain_flat(browser,
&entry->sorted_chain, row,
total, parent_total, print, arg,
is_output_full);
} else if (callchain_param.mode == CHAIN_FOLDED) {
printed = hist_browser__show_callchain_folded(browser,
&entry->sorted_chain, row,
total, parent_total, print, arg,
is_output_full);
} else {
printed = hist_browser__show_callchain_graph(browser,
&entry->sorted_chain, level, row,
total, parent_total, print, arg,
is_output_full);
}
if (arg->is_current_entry)
browser->he_selection = entry;
return printed;
}
struct hpp_arg {
struct ui_browser *b;
char folded_sign;
bool current_entry;
};
int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
{
struct hpp_arg *arg = hpp->ptr;
int ret, len;
va_list args;
double percent;
va_start(args, fmt);
len = va_arg(args, int);
percent = va_arg(args, double);
va_end(args);
ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent);
ui_browser__printf(arg->b, "%s", hpp->buf);
return ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 __hpp_get_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int \
hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return hpp__fmt(fmt, hpp, he, __hpp_get_##_field, " %*.2f%%", \
__hpp__slsmg_color_printf, true); \
}
#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
static u64 __hpp_get_acc_##_field(struct hist_entry *he) \
{ \
return he->stat_acc->_field; \
} \
\
static int \
hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
if (!symbol_conf.cumulate_callchain) { \
struct hpp_arg *arg = hpp->ptr; \
int len = fmt->user_len ?: fmt->len; \
int ret = scnprintf(hpp->buf, hpp->size, \
"%*s", len, "N/A"); \
ui_browser__printf(arg->b, "%s", hpp->buf); \
\
return ret; \
} \
return hpp__fmt(fmt, hpp, he, __hpp_get_acc_##_field, \
" %*.2f%%", __hpp__slsmg_color_printf, true); \
}
__HPP_COLOR_PERCENT_FN(overhead, period)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys)
__HPP_COLOR_PERCENT_FN(overhead_us, period_us)
__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys)
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us)
__HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period)
#undef __HPP_COLOR_PERCENT_FN
#undef __HPP_COLOR_ACC_PERCENT_FN
void hist_browser__init_hpp(void)
{
perf_hpp__format[PERF_HPP__OVERHEAD].color =
hist_browser__hpp_color_overhead;
perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
hist_browser__hpp_color_overhead_sys;
perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
hist_browser__hpp_color_overhead_us;
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
hist_browser__hpp_color_overhead_guest_sys;
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
hist_browser__hpp_color_overhead_guest_us;
perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
hist_browser__hpp_color_overhead_acc;
res_sample_init();
}
static int hist_browser__show_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row)
{
int printed = 0;
int width = browser->b.width;
char folded_sign = ' ';
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
bool use_callchain = hist_entry__has_callchains(entry) && symbol_conf.use_callchain;
off_t row_offset = entry->row_offset;
bool first = true;
struct perf_hpp_fmt *fmt;
if (current_entry) {
browser->he_selection = entry;
browser->selection = &entry->ms;
}
if (use_callchain) {
hist_entry__init_have_children(entry);
folded_sign = hist_entry__folded(entry);
}
if (row_offset == 0) {
struct hpp_arg arg = {
.b = &browser->b,
.folded_sign = folded_sign,
.current_entry = current_entry,
};
int column = 0;
ui_browser__gotorc(&browser->b, row, 0);
hists__for_each_format(browser->hists, fmt) {
char s[2048];
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
.ptr = &arg,
};
if (perf_hpp__should_skip(fmt, entry->hists) ||
column++ < browser->b.horiz_scroll)
continue;
if (current_entry && browser->b.navkeypressed) {
ui_browser__set_color(&browser->b,
HE_COLORSET_SELECTED);
} else {
ui_browser__set_color(&browser->b,
HE_COLORSET_NORMAL);
}
if (first) {
if (use_callchain) {
ui_browser__printf(&browser->b, "%c ", folded_sign);
width -= 2;
}
first = false;
} else {
ui_browser__printf(&browser->b, " ");
width -= 2;
}
if (fmt->color) {
int ret = fmt->color(fmt, &hpp, entry);
hist_entry__snprintf_alignment(entry, &hpp, fmt, ret);
/*
* fmt->color() already used ui_browser to
* print the non alignment bits, skip it (+ret):
*/
ui_browser__printf(&browser->b, "%s", s + ret);
} else {
hist_entry__snprintf_alignment(entry, &hpp, fmt, fmt->entry(fmt, &hpp, entry));
ui_browser__printf(&browser->b, "%s", s);
}
width -= hpp.buf - s;
}
/* The scroll bar isn't being used */
if (!browser->b.navkeypressed)
width += 1;
ui_browser__write_nstring(&browser->b, "", width);
++row;
++printed;
} else
--row_offset;
if (folded_sign == '-' && row != browser->b.rows) {
struct callchain_print_arg arg = {
.row_offset = row_offset,
.is_current_entry = current_entry,
};
printed += hist_browser__show_callchain(browser,
entry, 1, row,
hist_browser__show_callchain_entry,
&arg,
hist_browser__check_output_full);
}
return printed;
}
static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row,
int level)
{
int printed = 0;
int width = browser->b.width;
char folded_sign = ' ';
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
off_t row_offset = entry->row_offset;
bool first = true;
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
struct hpp_arg arg = {
.b = &browser->b,
.current_entry = current_entry,
};
int column = 0;
int hierarchy_indent = (entry->hists->nr_hpp_node - 2) * HIERARCHY_INDENT;
if (current_entry) {
browser->he_selection = entry;
browser->selection = &entry->ms;
}
hist_entry__init_have_children(entry);
folded_sign = hist_entry__folded(entry);
arg.folded_sign = folded_sign;
if (entry->leaf && row_offset) {
row_offset--;
goto show_callchain;
}
ui_browser__gotorc(&browser->b, row, 0);
if (current_entry && browser->b.navkeypressed)
ui_browser__set_color(&browser->b, HE_COLORSET_SELECTED);
else
ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
ui_browser__write_nstring(&browser->b, "", level * HIERARCHY_INDENT);
width -= level * HIERARCHY_INDENT;
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&entry->hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
char s[2048];
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
.ptr = &arg,
};
if (perf_hpp__should_skip(fmt, entry->hists) ||
column++ < browser->b.horiz_scroll)
continue;
if (current_entry && browser->b.navkeypressed) {
ui_browser__set_color(&browser->b,
HE_COLORSET_SELECTED);
} else {
ui_browser__set_color(&browser->b,
HE_COLORSET_NORMAL);
}
if (first) {
ui_browser__printf(&browser->b, "%c ", folded_sign);
width -= 2;
first = false;
} else {
ui_browser__printf(&browser->b, " ");
width -= 2;
}
if (fmt->color) {
int ret = fmt->color(fmt, &hpp, entry);
hist_entry__snprintf_alignment(entry, &hpp, fmt, ret);
/*
* fmt->color() already used ui_browser to
* print the non alignment bits, skip it (+ret):
*/
ui_browser__printf(&browser->b, "%s", s + ret);
} else {
int ret = fmt->entry(fmt, &hpp, entry);
hist_entry__snprintf_alignment(entry, &hpp, fmt, ret);
ui_browser__printf(&browser->b, "%s", s);
}
width -= hpp.buf - s;
}
if (!first) {
ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
width -= hierarchy_indent;
}
if (column >= browser->b.horiz_scroll) {
char s[2048];
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
.ptr = &arg,
};
if (current_entry && browser->b.navkeypressed) {
ui_browser__set_color(&browser->b,
HE_COLORSET_SELECTED);
} else {
ui_browser__set_color(&browser->b,
HE_COLORSET_NORMAL);
}
perf_hpp_list__for_each_format(entry->hpp_list, fmt) {
if (first) {
ui_browser__printf(&browser->b, "%c ", folded_sign);
first = false;
} else {
ui_browser__write_nstring(&browser->b, "", 2);
}
width -= 2;
/*
* No need to call hist_entry__snprintf_alignment()
* since this fmt is always the last column in the
* hierarchy mode.
*/
if (fmt->color) {
width -= fmt->color(fmt, &hpp, entry);
} else {
int i = 0;
width -= fmt->entry(fmt, &hpp, entry);
ui_browser__printf(&browser->b, "%s", skip_spaces(s));
while (isspace(s[i++]))
width++;
}
}
}
/* The scroll bar isn't being used */
if (!browser->b.navkeypressed)
width += 1;
ui_browser__write_nstring(&browser->b, "", width);
++row;
++printed;
show_callchain:
if (entry->leaf && folded_sign == '-' && row != browser->b.rows) {
struct callchain_print_arg carg = {
.row_offset = row_offset,
};
printed += hist_browser__show_callchain(browser, entry,
level + 1, row,
hist_browser__show_callchain_entry, &carg,
hist_browser__check_output_full);
}
return printed;
}
static int hist_browser__show_no_entry(struct hist_browser *browser,
unsigned short row, int level)
{
int width = browser->b.width;
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
bool first = true;
int column = 0;
int ret;
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
int indent = browser->hists->nr_hpp_node - 2;
if (current_entry) {
browser->he_selection = NULL;
browser->selection = NULL;
}
ui_browser__gotorc(&browser->b, row, 0);
if (current_entry && browser->b.navkeypressed)
ui_browser__set_color(&browser->b, HE_COLORSET_SELECTED);
else
ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
ui_browser__write_nstring(&browser->b, "", level * HIERARCHY_INDENT);
width -= level * HIERARCHY_INDENT;
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&browser->hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (perf_hpp__should_skip(fmt, browser->hists) ||
column++ < browser->b.horiz_scroll)
continue;
ret = fmt->width(fmt, NULL, browser->hists);
if (first) {
/* for folded sign */
first = false;
ret++;
} else {
/* space between columns */
ret += 2;
}
ui_browser__write_nstring(&browser->b, "", ret);
width -= ret;
}
ui_browser__write_nstring(&browser->b, "", indent * HIERARCHY_INDENT);
width -= indent * HIERARCHY_INDENT;
if (column >= browser->b.horiz_scroll) {
char buf[32];
ret = snprintf(buf, sizeof(buf), "no entry >= %.2f%%", browser->min_pcnt);
ui_browser__printf(&browser->b, " %s", buf);
width -= ret + 2;
}
/* The scroll bar isn't being used */
if (!browser->b.navkeypressed)
width += 1;
ui_browser__write_nstring(&browser->b, "", width);
return 1;
}
static int advance_hpp_check(struct perf_hpp *hpp, int inc)
{
advance_hpp(hpp, inc);
return hpp->size <= 0;
}
static int
hists_browser__scnprintf_headers(struct hist_browser *browser, char *buf,
size_t size, int line)
{
struct hists *hists = browser->hists;
struct perf_hpp dummy_hpp = {
.buf = buf,
.size = size,
};
struct perf_hpp_fmt *fmt;
size_t ret = 0;
int column = 0;
int span = 0;
if (hists__has_callchains(hists) && symbol_conf.use_callchain) {
ret = scnprintf(buf, size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
return ret;
}
hists__for_each_format(browser->hists, fmt) {
if (perf_hpp__should_skip(fmt, hists) || column++ < browser->b.horiz_scroll)
continue;
ret = fmt->header(fmt, &dummy_hpp, hists, line, &span);
if (advance_hpp_check(&dummy_hpp, ret))
break;
if (span)
continue;
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
break;
}
return ret;
}
static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *browser, char *buf, size_t size)
{
struct hists *hists = browser->hists;
struct perf_hpp dummy_hpp = {
.buf = buf,
.size = size,
};
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
size_t ret = 0;
int column = 0;
int indent = hists->nr_hpp_node - 2;
bool first_node, first_col;
ret = scnprintf(buf, size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
return ret;
first_node = true;
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (column++ < browser->b.horiz_scroll)
continue;
ret = fmt->header(fmt, &dummy_hpp, hists, 0, NULL);
if (advance_hpp_check(&dummy_hpp, ret))
break;
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
break;
first_node = false;
}
if (!first_node) {
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
indent * HIERARCHY_INDENT, "");
if (advance_hpp_check(&dummy_hpp, ret))
return ret;
}
first_node = true;
list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
if (!first_node) {
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " / ");
if (advance_hpp_check(&dummy_hpp, ret))
break;
}
first_node = false;
first_col = true;
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
char *start;
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first_col) {
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "+");
if (advance_hpp_check(&dummy_hpp, ret))
break;
}
first_col = false;
ret = fmt->header(fmt, &dummy_hpp, hists, 0, NULL);
dummy_hpp.buf[ret] = '\0';
start = strim(dummy_hpp.buf);
ret = strlen(start);
if (start != dummy_hpp.buf)
memmove(dummy_hpp.buf, start, ret + 1);
if (advance_hpp_check(&dummy_hpp, ret))
break;
}
}
return ret;
}
static void hists_browser__hierarchy_headers(struct hist_browser *browser)
{
char headers[1024];
hists_browser__scnprintf_hierarchy_headers(browser, headers,
sizeof(headers));
ui_browser__gotorc_title(&browser->b, 0, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
}
static void hists_browser__headers(struct hist_browser *browser)
{
struct hists *hists = browser->hists;
struct perf_hpp_list *hpp_list = hists->hpp_list;
int line;
for (line = 0; line < hpp_list->nr_header_lines; line++) {
char headers[1024];
hists_browser__scnprintf_headers(browser, headers,
sizeof(headers), line);
ui_browser__gotorc_title(&browser->b, line, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
}
}
static void hist_browser__show_headers(struct hist_browser *browser)
{
if (symbol_conf.report_hierarchy)
hists_browser__hierarchy_headers(browser);
else
hists_browser__headers(browser);
}
static void ui_browser__hists_init_top(struct ui_browser *browser)
{
if (browser->top == NULL) {
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
browser->top = rb_first_cached(&hb->hists->entries);
}
}
static unsigned int hist_browser__refresh(struct ui_browser *browser)
{
unsigned row = 0;
struct rb_node *nd;
struct hist_browser *hb = container_of(browser, struct hist_browser, b);
if (hb->show_headers)
hist_browser__show_headers(hb);
ui_browser__hists_init_top(browser);
hb->he_selection = NULL;
hb->selection = NULL;
for (nd = browser->top; nd; nd = rb_hierarchy_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
if (h->filtered) {
/* let it move to sibling */
h->unfolded = false;
continue;
}
if (symbol_conf.report_individual_block)
percent = block_info__total_cycles_percent(h);
else
percent = hist_entry__get_percent_limit(h);
if (percent < hb->min_pcnt)
continue;
if (symbol_conf.report_hierarchy) {
row += hist_browser__show_hierarchy_entry(hb, h, row,
h->depth);
if (row == browser->rows)
break;
if (h->has_no_entry) {
hist_browser__show_no_entry(hb, row, h->depth + 1);
row++;
}
} else {
row += hist_browser__show_entry(hb, h, row);
}
if (row == browser->rows)
break;
}
return row;
}
static struct rb_node *hists__filter_entries(struct rb_node *nd,
float min_pcnt)
{
while (nd != NULL) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent = hist_entry__get_percent_limit(h);
if (!h->filtered && percent >= min_pcnt)
return nd;
/*
* If it's filtered, its all children also were filtered.
* So move to sibling node.
*/
if (rb_next(nd))
nd = rb_next(nd);
else
nd = rb_hierarchy_next(nd);
}
return NULL;
}
static struct rb_node *hists__filter_prev_entries(struct rb_node *nd,
float min_pcnt)
{
while (nd != NULL) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent = hist_entry__get_percent_limit(h);
if (!h->filtered && percent >= min_pcnt)
return nd;
nd = rb_hierarchy_prev(nd);
}
return NULL;
}
static void ui_browser__hists_seek(struct ui_browser *browser,
off_t offset, int whence)
{
struct hist_entry *h;
struct rb_node *nd;
bool first = true;
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
if (browser->nr_entries == 0)
return;
ui_browser__hists_init_top(browser);
switch (whence) {
case SEEK_SET:
nd = hists__filter_entries(rb_first(browser->entries),
hb->min_pcnt);
break;
case SEEK_CUR:
nd = browser->top;
goto do_offset;
case SEEK_END:
nd = rb_hierarchy_last(rb_last(browser->entries));
nd = hists__filter_prev_entries(nd, hb->min_pcnt);
first = false;
break;
default:
return;
}
/*
* Moves not relative to the first visible entry invalidates its
* row_offset:
*/
h = rb_entry(browser->top, struct hist_entry, rb_node);
h->row_offset = 0;
/*
* Here we have to check if nd is expanded (+), if it is we can't go
* the next top level hist_entry, instead we must compute an offset of
* what _not_ to show and not change the first visible entry.
*
* This offset increments when we are going from top to bottom and
* decreases when we're going from bottom to top.
*
* As we don't have backpointers to the top level in the callchains
* structure, we need to always print the whole hist_entry callchain,
* skipping the first ones that are before the first visible entry
* and stop when we printed enough lines to fill the screen.
*/
do_offset:
if (!nd)
return;
if (offset > 0) {
do {
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->unfolded && h->leaf) {
u16 remaining = h->nr_rows - h->row_offset;
if (offset > remaining) {
offset -= remaining;
h->row_offset = 0;
} else {
h->row_offset += offset;
offset = 0;
browser->top = nd;
break;
}
}
nd = hists__filter_entries(rb_hierarchy_next(nd),
hb->min_pcnt);
if (nd == NULL)
break;
--offset;
browser->top = nd;
} while (offset != 0);
} else if (offset < 0) {
while (1) {
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->unfolded && h->leaf) {
if (first) {
if (-offset > h->row_offset) {
offset += h->row_offset;
h->row_offset = 0;
} else {
h->row_offset += offset;
offset = 0;
browser->top = nd;
break;
}
} else {
if (-offset > h->nr_rows) {
offset += h->nr_rows;
h->row_offset = 0;
} else {
h->row_offset = h->nr_rows + offset;
offset = 0;
browser->top = nd;
break;
}
}
}
nd = hists__filter_prev_entries(rb_hierarchy_prev(nd),
hb->min_pcnt);
if (nd == NULL)
break;
++offset;
browser->top = nd;
if (offset == 0) {
/*
* Last unfiltered hist_entry, check if it is
* unfolded, if it is then we should have
* row_offset at its last entry.
*/
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->unfolded && h->leaf)
h->row_offset = h->nr_rows;
break;
}
first = false;
}
} else {
browser->top = nd;
h = rb_entry(nd, struct hist_entry, rb_node);
h->row_offset = 0;
}
}
static int hist_browser__fprintf_callchain(struct hist_browser *browser,
struct hist_entry *he, FILE *fp,
int level)
{
struct callchain_print_arg arg = {
.fp = fp,
};
hist_browser__show_callchain(browser, he, level, 0,
hist_browser__fprintf_callchain_entry, &arg,
hist_browser__check_dump_full);
return arg.printed;
}
static int hist_browser__fprintf_entry(struct hist_browser *browser,
struct hist_entry *he, FILE *fp)
{
char s[8192];
int printed = 0;
char folded_sign = ' ';
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
};
struct perf_hpp_fmt *fmt;
bool first = true;
int ret;
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
folded_sign = hist_entry__folded(he);
printed += fprintf(fp, "%c ", folded_sign);
}
hists__for_each_format(browser->hists, fmt) {
if (perf_hpp__should_skip(fmt, he->hists))
continue;
if (!first) {
ret = scnprintf(hpp.buf, hpp.size, " ");
advance_hpp(&hpp, ret);
} else
first = false;
ret = fmt->entry(fmt, &hpp, he);
ret = hist_entry__snprintf_alignment(he, &hpp, fmt, ret);
advance_hpp(&hpp, ret);
}
printed += fprintf(fp, "%s\n", s);
if (folded_sign == '-')
printed += hist_browser__fprintf_callchain(browser, he, fp, 1);
return printed;
}
static int hist_browser__fprintf_hierarchy_entry(struct hist_browser *browser,
struct hist_entry *he,
FILE *fp, int level)
{
char s[8192];
int printed = 0;
char folded_sign = ' ';
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
};
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
bool first = true;
int ret;
int hierarchy_indent = (he->hists->nr_hpp_node - 2) * HIERARCHY_INDENT;
printed = fprintf(fp, "%*s", level * HIERARCHY_INDENT, "");
folded_sign = hist_entry__folded(he);
printed += fprintf(fp, "%c", folded_sign);
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&he->hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (!first) {
ret = scnprintf(hpp.buf, hpp.size, " ");
advance_hpp(&hpp, ret);
} else
first = false;
ret = fmt->entry(fmt, &hpp, he);
advance_hpp(&hpp, ret);
}
ret = scnprintf(hpp.buf, hpp.size, "%*s", hierarchy_indent, "");
advance_hpp(&hpp, ret);
perf_hpp_list__for_each_format(he->hpp_list, fmt) {
ret = scnprintf(hpp.buf, hpp.size, " ");
advance_hpp(&hpp, ret);
ret = fmt->entry(fmt, &hpp, he);
advance_hpp(&hpp, ret);
}
strim(s);
printed += fprintf(fp, "%s\n", s);
if (he->leaf && folded_sign == '-') {
printed += hist_browser__fprintf_callchain(browser, he, fp,
he->depth + 1);
}
return printed;
}
static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp)
{
struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries),
browser->min_pcnt);
int printed = 0;
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (symbol_conf.report_hierarchy) {
printed += hist_browser__fprintf_hierarchy_entry(browser,
h, fp,
h->depth);
} else {
printed += hist_browser__fprintf_entry(browser, h, fp);
}
nd = hists__filter_entries(rb_hierarchy_next(nd),
browser->min_pcnt);
}
return printed;
}
static int hist_browser__dump(struct hist_browser *browser)
{
char filename[64];
FILE *fp;
while (1) {
scnprintf(filename, sizeof(filename), "perf.hist.%d", browser->print_seq);
if (access(filename, F_OK))
break;
/*
* XXX: Just an arbitrary lazy upper limit
*/
if (++browser->print_seq == 8192) {
ui_helpline__fpush("Too many perf.hist.N files, nothing written!");
return -1;
}
}
fp = fopen(filename, "w");
if (fp == NULL) {
char bf[64];
const char *err = str_error_r(errno, bf, sizeof(bf));
ui_helpline__fpush("Couldn't write to %s: %s", filename, err);
return -1;
}
++browser->print_seq;
hist_browser__fprintf(browser, fp);
fclose(fp);
ui_helpline__fpush("%s written!", filename);
return 0;
}
void hist_browser__init(struct hist_browser *browser,
struct hists *hists)
{
struct perf_hpp_fmt *fmt;
browser->hists = hists;
browser->b.refresh = hist_browser__refresh;
browser->b.refresh_dimensions = hist_browser__refresh_dimensions;
browser->b.seek = ui_browser__hists_seek;
browser->b.use_navkeypressed = true;
browser->show_headers = symbol_conf.show_hist_headers;
hist_browser__set_title_space(browser);
if (symbol_conf.report_hierarchy) {
struct perf_hpp_list_node *fmt_node;
/* count overhead columns (in the first node) */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
++browser->b.columns;
/* add a single column for whole hierarchy sort keys*/
++browser->b.columns;
} else {
hists__for_each_format(hists, fmt)
++browser->b.columns;
}
hists__reset_column_width(hists);
}
struct hist_browser *hist_browser__new(struct hists *hists)
{
struct hist_browser *browser = zalloc(sizeof(*browser));
if (browser)
hist_browser__init(browser, hists);
return browser;
}
static struct hist_browser *
perf_evsel_browser__new(struct evsel *evsel,
struct hist_browser_timer *hbt,
struct perf_env *env,
struct annotation_options *annotation_opts)
{
struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
if (browser) {
browser->hbt = hbt;
browser->env = env;
browser->title = hists_browser__scnprintf_title;
browser->annotation_opts = annotation_opts;
}
return browser;
}
void hist_browser__delete(struct hist_browser *browser)
{
free(browser);
}
static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser)
{
return browser->he_selection;
}
static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
{
return browser->he_selection->thread;
}
static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser)
{
return browser->he_selection ? browser->he_selection->res_samples : NULL;
}
/* Check whether the browser is for 'top' or 'report' */
static inline bool is_report_browser(void *timer)
{
return timer == NULL;
}
static int hists_browser__scnprintf_title(struct hist_browser *browser, char *bf, size_t size)
{
struct hist_browser_timer *hbt = browser->hbt;
int printed = __hists__scnprintf_title(browser->hists, bf, size, !is_report_browser(hbt));
if (!is_report_browser(hbt)) {
struct perf_top *top = hbt->arg;
printed += scnprintf(bf + printed, size - printed,
" lost: %" PRIu64 "/%" PRIu64,
top->lost, top->lost_total);
printed += scnprintf(bf + printed, size - printed,
" drop: %" PRIu64 "/%" PRIu64,
top->drop, top->drop_total);
if (top->zero)
printed += scnprintf(bf + printed, size - printed, " [z]");
perf_top__reset_sample_counters(top);
}
return printed;
}
static inline void free_popup_options(char **options, int n)
{
int i;
for (i = 0; i < n; ++i)
zfree(&options[i]);
}
/*
* Only runtime switching of perf data file will make "input_name" point
* to a malloced buffer. So add "is_input_name_malloced" flag to decide
* whether we need to call free() for current "input_name" during the switch.
*/
static bool is_input_name_malloced = false;
static int switch_data_file(void)
{
char *pwd, *options[32], *abs_path[32], *tmp;
DIR *pwd_dir;
int nr_options = 0, choice = -1, ret = -1;
struct dirent *dent;
pwd = getenv("PWD");
if (!pwd)
return ret;
pwd_dir = opendir(pwd);
if (!pwd_dir)
return ret;
memset(options, 0, sizeof(options));
memset(abs_path, 0, sizeof(abs_path));
while ((dent = readdir(pwd_dir))) {
char path[PATH_MAX];
u64 magic;
char *name = dent->d_name;
FILE *file;
if (!(dent->d_type == DT_REG))
continue;
snprintf(path, sizeof(path), "%s/%s", pwd, name);
file = fopen(path, "r");
if (!file)
continue;
if (fread(&magic, 1, 8, file) < 8)
goto close_file_and_continue;
if (is_perf_magic(magic)) {
options[nr_options] = strdup(name);
if (!options[nr_options])
goto close_file_and_continue;
abs_path[nr_options] = strdup(path);
if (!abs_path[nr_options]) {
zfree(&options[nr_options]);
ui__warning("Can't search all data files due to memory shortage.\n");
fclose(file);
break;
}
nr_options++;
}
close_file_and_continue:
fclose(file);
if (nr_options >= 32) {
ui__warning("Too many perf data files in PWD!\n"
"Only the first 32 files will be listed.\n");
break;
}
}
closedir(pwd_dir);
if (nr_options) {
choice = ui__popup_menu(nr_options, options, NULL);
if (choice < nr_options && choice >= 0) {
tmp = strdup(abs_path[choice]);
if (tmp) {
if (is_input_name_malloced)
free((void *)input_name);
input_name = tmp;
is_input_name_malloced = true;
ret = 0;
} else
ui__warning("Data switch failed due to memory shortage!\n");
}
}
free_popup_options(options, nr_options);
free_popup_options(abs_path, nr_options);
return ret;
}
struct popup_action {
unsigned long time;
struct thread *thread;
struct map_symbol ms;
int socket;
struct evsel *evsel;
enum rstype rstype;
int (*fn)(struct hist_browser *browser, struct popup_action *act);
};
static int
do_annotate(struct hist_browser *browser, struct popup_action *act)
{
struct evsel *evsel;
struct annotation *notes;
struct hist_entry *he;
int err;
if (!browser->annotation_opts->objdump_path &&
perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path))
return 0;
notes = symbol__annotation(act->ms.sym);
if (!notes->src)
return 0;
if (browser->block_evsel)
evsel = browser->block_evsel;
else
evsel = hists_to_evsel(browser->hists);
err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt,
browser->annotation_opts);
he = hist_browser__selected_entry(browser);
/*
* offer option to annotate the other branch source or target
* (if they exists) when returning from annotate
*/
if ((err == 'q' || err == CTRL('c')) && he->branch_info)
return 1;
ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
if (err)
ui_browser__handle_resize(&browser->b);
return 0;
}
static struct symbol *symbol__new_unresolved(u64 addr, struct map *map)
{
struct annotated_source *src;
struct symbol *sym;
char name[64];
snprintf(name, sizeof(name), "%.*" PRIx64, BITS_PER_LONG / 4, addr);
sym = symbol__new(addr, ANNOTATION_DUMMY_LEN, 0, 0, name);
if (sym) {
src = symbol__hists(sym, 1);
if (!src) {
symbol__delete(sym);
return NULL;
}
dso__insert_symbol(map__dso(map), sym);
}
return sym;
}
static int
add_annotate_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
struct map_symbol *ms,
u64 addr)
{
struct dso *dso;
if (!ms->map || (dso = map__dso(ms->map)) == NULL || dso->annotate_warned)
return 0;
if (!ms->sym)
ms->sym = symbol__new_unresolved(addr, ms->map);
if (ms->sym == NULL || symbol__annotation(ms->sym)->src == NULL)
return 0;
if (asprintf(optstr, "Annotate %s", ms->sym->name) < 0)
return 0;
act->ms = *ms;
act->fn = do_annotate;
return 1;
}
static int
do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
{
struct thread *thread = act->thread;
if ((!hists__has(browser->hists, thread) &&
!hists__has(browser->hists, comm)) || thread == NULL)
return 0;
if (browser->hists->thread_filter) {
pstack__remove(browser->pstack, &browser->hists->thread_filter);
perf_hpp__set_elide(HISTC_THREAD, false);
thread__zput(browser->hists->thread_filter);
ui_helpline__pop();
} else {
const char *comm_set_str =
thread__comm_set(thread) ? thread__comm_str(thread) : "";
if (hists__has(browser->hists, thread)) {
ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
comm_set_str, thread__tid(thread));
} else {
ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s thread\"",
comm_set_str);
}
browser->hists->thread_filter = thread__get(thread);
perf_hpp__set_elide(HISTC_THREAD, false);
pstack__push(browser->pstack, &browser->hists->thread_filter);
}
hists__filter_by_thread(browser->hists);
hist_browser__reset(browser);
return 0;
}
static int
add_thread_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct thread *thread)
{
int ret;
const char *comm_set_str, *in_out;
if ((!hists__has(browser->hists, thread) &&
!hists__has(browser->hists, comm)) || thread == NULL)
return 0;
in_out = browser->hists->thread_filter ? "out of" : "into";
comm_set_str = thread__comm_set(thread) ? thread__comm_str(thread) : "";
if (hists__has(browser->hists, thread)) {
ret = asprintf(optstr, "Zoom %s %s(%d) thread",
in_out, comm_set_str, thread__tid(thread));
} else {
ret = asprintf(optstr, "Zoom %s %s thread", in_out, comm_set_str);
}
if (ret < 0)
return 0;
act->thread = thread;
act->fn = do_zoom_thread;
return 1;
}
static int hists_browser__zoom_map(struct hist_browser *browser, struct map *map)
{
if (!hists__has(browser->hists, dso) || map == NULL)
return 0;
if (browser->hists->dso_filter) {
pstack__remove(browser->pstack, &browser->hists->dso_filter);
perf_hpp__set_elide(HISTC_DSO, false);
browser->hists->dso_filter = NULL;
ui_helpline__pop();
} else {
struct dso *dso = map__dso(map);
ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s DSO\"",
__map__is_kernel(map) ? "the Kernel" : dso->short_name);
browser->hists->dso_filter = dso;
perf_hpp__set_elide(HISTC_DSO, true);
pstack__push(browser->pstack, &browser->hists->dso_filter);
}
hists__filter_by_dso(browser->hists);
hist_browser__reset(browser);
return 0;
}
static int
do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
{
return hists_browser__zoom_map(browser, act->ms.map);
}
static int
add_dso_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct map *map)
{
if (!hists__has(browser->hists, dso) || map == NULL)
return 0;
if (asprintf(optstr, "Zoom %s %s DSO (use the 'k' hotkey to zoom directly into the kernel)",
browser->hists->dso_filter ? "out of" : "into",
__map__is_kernel(map) ? "the Kernel" : map__dso(map)->short_name) < 0)
return 0;
act->ms.map = map;
act->fn = do_zoom_dso;
return 1;
}
static int do_toggle_callchain(struct hist_browser *browser, struct popup_action *act __maybe_unused)
{
hist_browser__toggle_fold(browser);
return 0;
}
static int add_callchain_toggle_opt(struct hist_browser *browser, struct popup_action *act, char **optstr)
{
char sym_name[512];
if (!hist_browser__selection_has_children(browser))
return 0;
if (asprintf(optstr, "%s [%s] callchain (one level, same as '+' hotkey, use 'e'/'c' for the whole main level entry)",
hist_browser__selection_unfolded(browser) ? "Collapse" : "Expand",
hist_browser__selection_sym_name(browser, sym_name, sizeof(sym_name))) < 0)
return 0;
act->fn = do_toggle_callchain;
return 1;
}
static int
do_browse_map(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
map__browse(act->ms.map);
return 0;
}
static int
add_map_opt(struct hist_browser *browser,
struct popup_action *act, char **optstr, struct map *map)
{
if (!hists__has(browser->hists, dso) || map == NULL)
return 0;
if (asprintf(optstr, "Browse map details") < 0)
return 0;
act->ms.map = map;
act->fn = do_browse_map;
return 1;
}
static int
do_run_script(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
char *script_opt;
int len;
int n = 0;
len = 100;
if (act->thread)
len += strlen(thread__comm_str(act->thread));
else if (act->ms.sym)
len += strlen(act->ms.sym->name);
script_opt = malloc(len);
if (!script_opt)
return -1;
script_opt[0] = 0;
if (act->thread) {
n = scnprintf(script_opt, len, " -c %s ",
thread__comm_str(act->thread));
} else if (act->ms.sym) {
n = scnprintf(script_opt, len, " -S %s ",
act->ms.sym->name);
}
if (act->time) {
char start[32], end[32];
unsigned long starttime = act->time;
unsigned long endtime = act->time + symbol_conf.time_quantum;
if (starttime == endtime) { /* Display 1ms as fallback */
starttime -= 1*NSEC_PER_MSEC;
endtime += 1*NSEC_PER_MSEC;
}
timestamp__scnprintf_usec(starttime, start, sizeof start);
timestamp__scnprintf_usec(endtime, end, sizeof end);
n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
}
script_browse(script_opt, act->evsel);
free(script_opt);
return 0;
}
static int
do_res_sample_script(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
struct hist_entry *he;
he = hist_browser__selected_entry(browser);
res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
return 0;
}
static int
add_script_opt_2(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
struct thread *thread, struct symbol *sym,
struct evsel *evsel, const char *tstr)
{
if (thread) {
if (asprintf(optstr, "Run scripts for samples of thread [%s]%s",
thread__comm_str(thread), tstr) < 0)
return 0;
} else if (sym) {
if (asprintf(optstr, "Run scripts for samples of symbol [%s]%s",
sym->name, tstr) < 0)
return 0;
} else {
if (asprintf(optstr, "Run scripts for all samples%s", tstr) < 0)
return 0;
}
act->thread = thread;
act->ms.sym = sym;
act->evsel = evsel;
act->fn = do_run_script;
return 1;
}
static int
add_script_opt(struct hist_browser *browser,
struct popup_action *act, char **optstr,
struct thread *thread, struct symbol *sym,
struct evsel *evsel)
{
int n, j;
struct hist_entry *he;
n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
he = hist_browser__selected_entry(browser);
if (sort_order && strstr(sort_order, "time")) {
char tstr[128];
optstr++;
act++;
j = sprintf(tstr, " in ");
j += timestamp__scnprintf_usec(he->time, tstr + j,
sizeof tstr - j);
j += sprintf(tstr + j, "-");
timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
tstr + j, sizeof tstr - j);
n += add_script_opt_2(browser, act, optstr, thread, sym,
evsel, tstr);
act->time = he->time;
}
return n;
}
static int
add_res_sample_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
struct res_sample *res_sample,
struct evsel *evsel,
enum rstype type)
{
if (!res_sample)
return 0;
if (asprintf(optstr, "Show context for individual samples %s",
type == A_ASM ? "with assembler" :
type == A_SOURCE ? "with source" : "") < 0)
return 0;
act->fn = do_res_sample_script;
act->evsel = evsel;
act->rstype = type;
return 1;
}
static int
do_switch_data(struct hist_browser *browser __maybe_unused,
struct popup_action *act __maybe_unused)
{
if (switch_data_file()) {
ui__warning("Won't switch the data files due to\n"
"no valid data file get selected!\n");
return 0;
}
return K_SWITCH_INPUT_DATA;
}
static int
add_switch_opt(struct hist_browser *browser,
struct popup_action *act, char **optstr)
{
if (!is_report_browser(browser->hbt))
return 0;
if (asprintf(optstr, "Switch to another data file in PWD") < 0)
return 0;
act->fn = do_switch_data;
return 1;
}
static int
do_exit_browser(struct hist_browser *browser __maybe_unused,
struct popup_action *act __maybe_unused)
{
return 0;
}
static int
add_exit_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr)
{
if (asprintf(optstr, "Exit") < 0)
return 0;
act->fn = do_exit_browser;
return 1;
}
static int
do_zoom_socket(struct hist_browser *browser, struct popup_action *act)
{
if (!hists__has(browser->hists, socket) || act->socket < 0)
return 0;
if (browser->hists->socket_filter > -1) {
pstack__remove(browser->pstack, &browser->hists->socket_filter);
browser->hists->socket_filter = -1;
perf_hpp__set_elide(HISTC_SOCKET, false);
} else {
browser->hists->socket_filter = act->socket;
perf_hpp__set_elide(HISTC_SOCKET, true);
pstack__push(browser->pstack, &browser->hists->socket_filter);
}
hists__filter_by_socket(browser->hists);
hist_browser__reset(browser);
return 0;
}
static int
add_socket_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, int socket_id)
{
if (!hists__has(browser->hists, socket) || socket_id < 0)
return 0;
if (asprintf(optstr, "Zoom %s Processor Socket %d",
(browser->hists->socket_filter > -1) ? "out of" : "into",
socket_id) < 0)
return 0;
act->socket = socket_id;
act->fn = do_zoom_socket;
return 1;
}
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
struct rb_node *nd = rb_first_cached(&hb->hists->entries);
if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
return;
}
while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) {
nr_entries++;
nd = rb_hierarchy_next(nd);
}
hb->nr_non_filtered_entries = nr_entries;
hb->nr_hierarchy_entries = nr_entries;
}
static void hist_browser__update_percent_limit(struct hist_browser *hb,
double percent)
{
struct hist_entry *he;
struct rb_node *nd = rb_first_cached(&hb->hists->entries);
u64 total = hists__total_period(hb->hists);
u64 min_callchain_hits = total * (percent / 100);
hb->min_pcnt = callchain_param.min_percent = percent;
while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) {
he = rb_entry(nd, struct hist_entry, rb_node);
if (he->has_no_entry) {
he->has_no_entry = false;
he->nr_rows = 0;
}
if (!he->leaf || !hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
goto next;
if (callchain_param.mode == CHAIN_GRAPH_REL) {
total = he->stat.period;
if (symbol_conf.cumulate_callchain)
total = he->stat_acc->period;
min_callchain_hits = total * (percent / 100);
}
callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
next:
nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
/* force to re-evaluate folding state of callchains */
he->init_have_children = false;
hist_entry__set_folding(he, hb, false);
}
}
static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *helpline,
bool left_exits, struct hist_browser_timer *hbt, float min_pcnt,
struct perf_env *env, bool warn_lost_event,
struct annotation_options *annotation_opts)
{
struct hists *hists = evsel__hists(evsel);
struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
struct branch_info *bi = NULL;
#define MAX_OPTIONS 16
char *options[MAX_OPTIONS];
struct popup_action actions[MAX_OPTIONS];
int nr_options = 0;
int key = -1;
char buf[128];
int delay_secs = hbt ? hbt->refresh : 0;
#define HIST_BROWSER_HELP_COMMON \
"h/?/F1 Show this window\n" \
"UP/DOWN/PGUP\n" \
"PGDN/SPACE Navigate\n" \
"q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \
"For multiple event sessions:\n\n" \
"TAB/UNTAB Switch events\n\n" \
"For symbolic views (--sort has sym):\n\n" \
"ENTER Zoom into DSO/Threads & Annotate current symbol\n" \
"ESC Zoom out\n" \
"+ Expand/Collapse one callchain level\n" \
"a Annotate current symbol\n" \
"C Collapse all callchains\n" \
"d Zoom into current DSO\n" \
"e Expand/Collapse main entry callchains\n" \
"E Expand all callchains\n" \
"F Toggle percentage of filtered entries\n" \
"H Display column headers\n" \
"k Zoom into the kernel map\n" \
"L Change percent limit\n" \
"m Display context menu\n" \
"S Zoom into current Processor Socket\n" \
/* help messages are sorted by lexical order of the hotkey */
static const char report_help[] = HIST_BROWSER_HELP_COMMON
"i Show header information\n"
"P Print histograms to perf.hist.N\n"
"r Run available scripts\n"
"s Switch to another data file in PWD\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name\n"
"0-9 Sort by event n in group";
static const char top_help[] = HIST_BROWSER_HELP_COMMON
"P Print histograms to perf.hist.N\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
"z Toggle zeroing of samples\n"
"f Enable/Disable events\n"
"/ Filter symbol by name";
if (browser == NULL)
return -1;
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
if (min_pcnt)
browser->min_pcnt = min_pcnt;
hist_browser__update_nr_entries(browser);
browser->pstack = pstack__new(3);
if (browser->pstack == NULL)
goto out;
ui_helpline__push(helpline);
memset(options, 0, sizeof(options));
memset(actions, 0, sizeof(actions));
if (symbol_conf.col_width_list_str)
perf_hpp__set_user_width(symbol_conf.col_width_list_str);
if (!is_report_browser(hbt))
browser->b.no_samples_msg = "Collecting samples...";
while (1) {
struct thread *thread = NULL;
struct map *map = NULL;
int choice;
int socked_id = -1;
key = 0; // reset key
do_hotkey: // key came straight from options ui__popup_menu()
choice = nr_options = 0;
key = hist_browser__run(browser, helpline, warn_lost_event, key);
if (browser->he_selection != NULL) {
thread = hist_browser__selected_thread(browser);
map = browser->selection->map;
socked_id = browser->he_selection->socket;
}
switch (key) {
case K_TAB:
case K_UNTAB:
if (nr_events == 1)
continue;
/*
* Exit the browser, let hists__browser_tree
* go to the next or previous
*/
goto out_free_stack;
case '0' ... '9':
if (!symbol_conf.event_group ||
evsel->core.nr_members < 2) {
snprintf(buf, sizeof(buf),
"Sort by index only available with group events!");
helpline = buf;
continue;
}
if (key - '0' == symbol_conf.group_sort_idx)
continue;
symbol_conf.group_sort_idx = key - '0';
if (symbol_conf.group_sort_idx >= evsel->core.nr_members) {
snprintf(buf, sizeof(buf),
"Max event group index to sort is %d (index from 0 to %d)",
evsel->core.nr_members - 1,
evsel->core.nr_members - 1);
helpline = buf;
continue;
}
key = K_RELOAD;
goto out_free_stack;
case 'a':
if (!hists__has(hists, sym)) {
ui_browser__warning(&browser->b, delay_secs * 2,
"Annotation is only available for symbolic views, "
"include \"sym*\" in --sort to use it.");
continue;
}
if (!browser->selection ||
!browser->selection->map ||
!map__dso(browser->selection->map) ||
map__dso(browser->selection->map)->annotate_warned) {
continue;
}
if (!browser->selection->sym) {
if (!browser->he_selection)
continue;
if (sort__mode == SORT_MODE__BRANCH) {
bi = browser->he_selection->branch_info;
if (!bi || !bi->to.ms.map)
continue;
actions->ms.sym = symbol__new_unresolved(bi->to.al_addr, bi->to.ms.map);
actions->ms.map = bi->to.ms.map;
} else {
actions->ms.sym = symbol__new_unresolved(browser->he_selection->ip,
browser->selection->map);
actions->ms.map = browser->selection->map;
}
if (!actions->ms.sym)
continue;
} else {
if (symbol__annotation(browser->selection->sym)->src == NULL) {
ui_browser__warning(&browser->b, delay_secs * 2,
"No samples for the \"%s\" symbol.\n\n"
"Probably appeared just in a callchain",
browser->selection->sym->name);
continue;
}
actions->ms.map = browser->selection->map;
actions->ms.sym = browser->selection->sym;
}
do_annotate(browser, actions);
continue;
case 'P':
hist_browser__dump(browser);
continue;
case 'd':
actions->ms.map = map;
do_zoom_dso(browser, actions);
continue;
case 'k':
if (browser->selection != NULL)
hists_browser__zoom_map(browser,
maps__machine(browser->selection->maps)->vmlinux_map);
continue;
case 'V':
verbose = (verbose + 1) % 4;
browser->show_dso = verbose > 0;
ui_helpline__fpush("Verbosity level set to %d\n",
verbose);
continue;
case 't':
actions->thread = thread;
do_zoom_thread(browser, actions);
continue;
case 'S':
actions->socket = socked_id;
do_zoom_socket(browser, actions);
continue;
case '/':
if (ui_browser__input_window("Symbol to show",
"Please enter the name of symbol you want to see.\n"
"To remove the filter later, press / + ENTER.",
buf, "ENTER: OK, ESC: Cancel",
delay_secs * 2) == K_ENTER) {
hists->symbol_filter_str = *buf ? buf : NULL;
hists__filter_by_symbol(hists);
hist_browser__reset(browser);
}
continue;
case 'r':
if (is_report_browser(hbt)) {
actions->thread = NULL;
actions->ms.sym = NULL;
do_run_script(browser, actions);
}
continue;
case 's':
if (is_report_browser(hbt)) {
key = do_switch_data(browser, actions);
if (key == K_SWITCH_INPUT_DATA)
goto out_free_stack;
}
continue;
case 'i':
/* env->arch is NULL for live-mode (i.e. perf top) */
if (env->arch)
tui__header_window(env);
continue;
case 'F':
symbol_conf.filter_relative ^= 1;
continue;
case 'z':
if (!is_report_browser(hbt)) {
struct perf_top *top = hbt->arg;
top->zero = !top->zero;
}
continue;
case 'L':
if (ui_browser__input_window("Percent Limit",
"Please enter the value you want to hide entries under that percent.",
buf, "ENTER: OK, ESC: Cancel",
delay_secs * 2) == K_ENTER) {
char *end;
double new_percent = strtod(buf, &end);
if (new_percent < 0 || new_percent > 100) {
ui_browser__warning(&browser->b, delay_secs * 2,
"Invalid percent: %.2f", new_percent);
continue;
}
hist_browser__update_percent_limit(browser, new_percent);
hist_browser__reset(browser);
}
continue;
case K_F1:
case 'h':
case '?':
ui_browser__help_window(&browser->b,
is_report_browser(hbt) ? report_help : top_help);
continue;
case K_ENTER:
case K_RIGHT:
case 'm':
/* menu */
break;
case K_ESC:
case K_LEFT: {
const void *top;
if (pstack__empty(browser->pstack)) {
/*
* Go back to the perf_evsel_menu__run or other user
*/
if (left_exits)
goto out_free_stack;
if (key == K_ESC &&
ui_browser__dialog_yesno(&browser->b,
"Do you really want to exit?"))
goto out_free_stack;
continue;
}
actions->ms.map = map;
top = pstack__peek(browser->pstack);
if (top == &browser->hists->dso_filter) {
/*
* No need to set actions->dso here since
* it's just to remove the current filter.
* Ditto for thread below.
*/
do_zoom_dso(browser, actions);
} else if (top == &browser->hists->thread_filter) {
do_zoom_thread(browser, actions);
} else if (top == &browser->hists->socket_filter) {
do_zoom_socket(browser, actions);
}
continue;
}
case 'q':
case CTRL('c'):
goto out_free_stack;
case 'f':
if (!is_report_browser(hbt)) {
struct perf_top *top = hbt->arg;
evlist__toggle_enable(top->evlist);
/*
* No need to refresh, resort/decay histogram
* entries if we are not collecting samples:
*/
if (top->evlist->enabled) {
helpline = "Press 'f' to disable the events or 'h' to see other hotkeys";
hbt->refresh = delay_secs;
} else {
helpline = "Press 'f' again to re-enable the events";
hbt->refresh = 0;
}
continue;
}
/* Fall thru */
default:
helpline = "Press '?' for help on key bindings";
continue;
}
if (!hists__has(hists, sym) || browser->selection == NULL)
goto skip_annotation;
if (sort__mode == SORT_MODE__BRANCH) {
if (browser->he_selection)
bi = browser->he_selection->branch_info;
if (bi == NULL)
goto skip_annotation;
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
&bi->from.ms,
bi->from.al_addr);
if (bi->to.ms.sym != bi->from.ms.sym)
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
&bi->to.ms,
bi->to.al_addr);
} else {
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
browser->selection,
browser->he_selection->ip);
}
skip_annotation:
nr_options += add_thread_opt(browser, &actions[nr_options],
&options[nr_options], thread);
nr_options += add_dso_opt(browser, &actions[nr_options],
&options[nr_options], map);
nr_options += add_callchain_toggle_opt(browser, &actions[nr_options], &options[nr_options]);
nr_options += add_map_opt(browser, &actions[nr_options],
&options[nr_options],
browser->selection ?
browser->selection->map : NULL);
nr_options += add_socket_opt(browser, &actions[nr_options],
&options[nr_options],
socked_id);
/* perf script support */
if (!is_report_browser(hbt))
goto skip_scripting;
if (browser->he_selection) {
if (hists__has(hists, thread) && thread) {
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
thread, NULL, evsel);
}
/*
* Note that browser->selection != NULL
* when browser->he_selection is not NULL,
* so we don't need to check browser->selection
* before fetching browser->selection->sym like what
* we do before fetching browser->selection->map.
*
* See hist_browser__show_entry.
*/
if (hists__has(hists, sym) && browser->selection->sym) {
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
NULL, browser->selection->sym,
evsel);
}
}
nr_options += add_script_opt(browser, &actions[nr_options],
&options[nr_options], NULL, NULL, evsel);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_res_sample(browser),
evsel, A_NORMAL);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_res_sample(browser),
evsel, A_ASM);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_res_sample(browser),
evsel, A_SOURCE);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
skip_scripting:
nr_options += add_exit_opt(browser, &actions[nr_options],
&options[nr_options]);
do {
struct popup_action *act;
choice = ui__popup_menu(nr_options, options, &key);
if (choice == -1)
break;
if (choice == nr_options)
goto do_hotkey;
act = &actions[choice];
key = act->fn(browser, act);
} while (key == 1);
if (key == K_SWITCH_INPUT_DATA)
break;
}
out_free_stack:
pstack__delete(browser->pstack);
out:
hist_browser__delete(browser);
free_popup_options(options, MAX_OPTIONS);
return key;
}
struct evsel_menu {
struct ui_browser b;
struct evsel *selection;
struct annotation_options *annotation_opts;
bool lost_events, lost_events_warned;
float min_pcnt;
struct perf_env *env;
};
static void perf_evsel_menu__write(struct ui_browser *browser,
void *entry, int row)
{
struct evsel_menu *menu = container_of(browser,
struct evsel_menu, b);
struct evsel *evsel = list_entry(entry, struct evsel, core.node);
struct hists *hists = evsel__hists(evsel);
bool current_entry = ui_browser__is_current_entry(browser, row);
unsigned long nr_events = hists->stats.nr_samples;
const char *ev_name = evsel__name(evsel);
char bf[256], unit;
const char *warn = " ";
size_t printed;
ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
HE_COLORSET_NORMAL);
if (evsel__is_group_event(evsel)) {
struct evsel *pos;
ev_name = evsel__group_name(evsel);
for_each_group_member(pos, evsel) {
struct hists *pos_hists = evsel__hists(pos);
nr_events += pos_hists->stats.nr_samples;
}
}
nr_events = convert_unit(nr_events, &unit);
printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
unit, unit == ' ' ? "" : " ", ev_name);
ui_browser__printf(browser, "%s", bf);
nr_events = evsel->evlist->stats.nr_events[PERF_RECORD_LOST];
if (nr_events != 0) {
menu->lost_events = true;
if (!current_entry)
ui_browser__set_color(browser, HE_COLORSET_TOP);
nr_events = convert_unit(nr_events, &unit);
printed += scnprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!",
nr_events, unit, unit == ' ' ? "" : " ");
warn = bf;
}
ui_browser__write_nstring(browser, warn, browser->width - printed);
if (current_entry)
menu->selection = evsel;
}
static int perf_evsel_menu__run(struct evsel_menu *menu,
int nr_events, const char *help,
struct hist_browser_timer *hbt,
bool warn_lost_event)
{
struct evlist *evlist = menu->b.priv;
struct evsel *pos;
const char *title = "Available samples";
int delay_secs = hbt ? hbt->refresh : 0;
int key;
if (ui_browser__show(&menu->b, title,
"ESC: exit, ENTER|->: Browse histograms") < 0)
return -1;
while (1) {
key = ui_browser__run(&menu->b, delay_secs);
switch (key) {
case K_TIMER:
if (hbt)
hbt->timer(hbt->arg);
if (!menu->lost_events_warned &&
menu->lost_events &&
warn_lost_event) {
ui_browser__warn_lost_events(&menu->b);
menu->lost_events_warned = true;
}
continue;
case K_RIGHT:
case K_ENTER:
if (!menu->selection)
continue;
pos = menu->selection;
browse_hists:
evlist__set_selected(evlist, pos);
/*
* Give the calling tool a chance to populate the non
* default evsel resorted hists tree.
*/
if (hbt)
hbt->timer(hbt->arg);
key = evsel__hists_browse(pos, nr_events, help, true, hbt,
menu->min_pcnt, menu->env,
warn_lost_event,
menu->annotation_opts);
ui_browser__show_title(&menu->b, title);
switch (key) {
case K_TAB:
if (pos->core.node.next == &evlist->core.entries)
pos = evlist__first(evlist);
else
pos = evsel__next(pos);
goto browse_hists;
case K_UNTAB:
if (pos->core.node.prev == &evlist->core.entries)
pos = evlist__last(evlist);
else
pos = evsel__prev(pos);
goto browse_hists;
case K_SWITCH_INPUT_DATA:
case K_RELOAD:
case 'q':
case CTRL('c'):
goto out;
case K_ESC:
default:
continue;
}
case K_LEFT:
continue;
case K_ESC:
if (!ui_browser__dialog_yesno(&menu->b,
"Do you really want to exit?"))
continue;
/* Fall thru */
case 'q':
case CTRL('c'):
goto out;
default:
continue;
}
}
out:
ui_browser__hide(&menu->b);
return key;
}
static bool filter_group_entries(struct ui_browser *browser __maybe_unused,
void *entry)
{
struct evsel *evsel = list_entry(entry, struct evsel, core.node);
if (symbol_conf.event_group && !evsel__is_group_leader(evsel))
return true;
return false;
}
static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, const char *help,
struct hist_browser_timer *hbt, float min_pcnt, struct perf_env *env,
bool warn_lost_event, struct annotation_options *annotation_opts)
{
struct evsel *pos;
struct evsel_menu menu = {
.b = {
.entries = &evlist->core.entries,
.refresh = ui_browser__list_head_refresh,
.seek = ui_browser__list_head_seek,
.write = perf_evsel_menu__write,
.filter = filter_group_entries,
.nr_entries = nr_entries,
.priv = evlist,
},
.min_pcnt = min_pcnt,
.env = env,
.annotation_opts = annotation_opts,
};
ui_helpline__push("Press ESC to exit");
evlist__for_each_entry(evlist, pos) {
const char *ev_name = evsel__name(pos);
size_t line_len = strlen(ev_name) + 7;
if (menu.b.width < line_len)
menu.b.width = line_len;
}
return perf_evsel_menu__run(&menu, nr_entries, help,
hbt, warn_lost_event);
}
static bool evlist__single_entry(struct evlist *evlist)
{
int nr_entries = evlist->core.nr_entries;
if (nr_entries == 1)
return true;
if (nr_entries == 2) {
struct evsel *last = evlist__last(evlist);
if (evsel__is_dummy_event(last))
return true;
}
return false;
}
int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
float min_pcnt, struct perf_env *env, bool warn_lost_event,
struct annotation_options *annotation_opts)
{
int nr_entries = evlist->core.nr_entries;
if (evlist__single_entry(evlist)) {
single_entry: {
struct evsel *first = evlist__first(evlist);
return evsel__hists_browse(first, nr_entries, help, false, hbt, min_pcnt,
env, warn_lost_event, annotation_opts);
}
}
if (symbol_conf.event_group) {
struct evsel *pos;
nr_entries = 0;
evlist__for_each_entry(evlist, pos) {
if (evsel__is_group_leader(pos))
nr_entries++;
}
if (nr_entries == 1)
goto single_entry;
}
return __evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env,
warn_lost_event, annotation_opts);
}
static int block_hists_browser__title(struct hist_browser *browser, char *bf,
size_t size)
{
struct hists *hists = evsel__hists(browser->block_evsel);
const char *evname = evsel__name(browser->block_evsel);
unsigned long nr_samples = hists->stats.nr_samples;
int ret;
ret = scnprintf(bf, size, "# Samples: %lu", nr_samples);
if (evname)
scnprintf(bf + ret, size - ret, " of event '%s'", evname);
return 0;
}
int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
float min_percent, struct perf_env *env,
struct annotation_options *annotation_opts)
{
struct hists *hists = &bh->block_hists;
struct hist_browser *browser;
int key = -1;
struct popup_action action;
static const char help[] =
" q Quit \n";
browser = hist_browser__new(hists);
if (!browser)
return -1;
browser->block_evsel = evsel;
browser->title = block_hists_browser__title;
browser->min_pcnt = min_percent;
browser->env = env;
browser->annotation_opts = annotation_opts;
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
memset(&action, 0, sizeof(action));
while (1) {
key = hist_browser__run(browser, "? - help", true, 0);
switch (key) {
case 'q':
goto out;
case '?':
ui_browser__help_window(&browser->b, help);
break;
case 'a':
case K_ENTER:
if (!browser->selection ||
!browser->selection->sym) {
continue;
}
action.ms.map = browser->selection->map;
action.ms.sym = browser->selection->sym;
do_annotate(browser, &action);
continue;
default:
break;
}
}
out:
hist_browser__delete(browser);
return 0;
}
| linux-master | tools/perf/ui/browsers/hists.c |
// SPDX-License-Identifier: GPL-2.0
/* Display a menu with individual samples to browse with perf script */
#include "hist.h"
#include "evsel.h"
#include "hists.h"
#include "sort.h"
#include "config.h"
#include "time-utils.h"
#include "../util.h"
#include "../../util/util.h" // perf_exe()
#include "../../perf.h"
#include <stdlib.h>
#include <string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
static u64 context_len = 10 * NSEC_PER_MSEC;
static int res_sample_config(const char *var, const char *value, void *data __maybe_unused)
{
if (!strcmp(var, "samples.context"))
return perf_config_u64(&context_len, var, value);
return 0;
}
void res_sample_init(void)
{
perf_config(res_sample_config, NULL);
}
int res_sample_browse(struct res_sample *res_samples, int num_res,
struct evsel *evsel, enum rstype rstype)
{
char **names;
int i, n;
int choice;
char *cmd;
char pbuf[256], tidbuf[32], cpubuf[32];
const char *perf = perf_exe(pbuf, sizeof pbuf);
char trange[128], tsample[64];
struct res_sample *r;
char extra_format[256];
names = calloc(num_res, sizeof(char *));
if (!names)
return -1;
for (i = 0; i < num_res; i++) {
char tbuf[64];
timestamp__scnprintf_nsec(res_samples[i].time, tbuf, sizeof tbuf);
if (asprintf(&names[i], "%s: CPU %d tid %d", tbuf,
res_samples[i].cpu, res_samples[i].tid) < 0) {
while (--i >= 0)
zfree(&names[i]);
free(names);
return -1;
}
}
choice = ui__popup_menu(num_res, names, NULL);
for (i = 0; i < num_res; i++)
zfree(&names[i]);
free(names);
if (choice < 0 || choice >= num_res)
return -1;
r = &res_samples[choice];
n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange);
trange[n++] = ',';
timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n);
timestamp__scnprintf_nsec(r->time, tsample, sizeof tsample);
attr_to_script(extra_format, &evsel->core.attr);
if (asprintf(&cmd, "%s script %s%s --time %s %s%s %s%s --ns %s %s %s %s %s | less +/%s",
perf,
input_name ? "-i " : "",
input_name ? input_name : "",
trange,
r->cpu >= 0 ? "--cpu " : "",
r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
r->tid ? "--tid " : "",
r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "",
extra_format,
rstype == A_ASM ? "-F +insn --xed" :
rstype == A_SOURCE ? "-F +srcline,+srccode" : "",
symbol_conf.inline_name ? "--inline" : "",
"--show-lost-events ",
r->tid ? "--show-switch-events --show-task-events " : "",
tsample) < 0)
return -1;
run_script(cmd);
free(cmd);
return 0;
}
| linux-master | tools/perf/ui/browsers/res_sample.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <linux/string.h>
#include "../../util/callchain.h"
#include "../../util/debug.h"
#include "../../util/event.h"
#include "../../util/hist.h"
#include "../../util/map.h"
#include "../../util/maps.h"
#include "../../util/symbol.h"
#include "../../util/sort.h"
#include "../../util/evsel.h"
#include "../../util/srcline.h"
#include "../../util/string2.h"
#include "../../util/thread.h"
#include "../../util/block-info.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
{
int i;
int ret = fprintf(fp, " ");
for (i = 0; i < left_margin; i++)
ret += fprintf(fp, " ");
return ret;
}
static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
int left_margin)
{
int i;
size_t ret = callchain__fprintf_left_margin(fp, left_margin);
for (i = 0; i < depth; i++)
if (depth_mask & (1 << i))
ret += fprintf(fp, "| ");
else
ret += fprintf(fp, " ");
ret += fprintf(fp, "\n");
return ret;
}
static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
struct callchain_list *chain,
int depth, int depth_mask, int period,
u64 total_samples, int left_margin)
{
int i;
size_t ret = 0;
char bf[1024], *alloc_str = NULL;
char buf[64];
const char *str;
ret += callchain__fprintf_left_margin(fp, left_margin);
for (i = 0; i < depth; i++) {
if (depth_mask & (1 << i))
ret += fprintf(fp, "|");
else
ret += fprintf(fp, " ");
if (!period && i == depth - 1) {
ret += fprintf(fp, "--");
ret += callchain_node__fprintf_value(node, fp, total_samples);
ret += fprintf(fp, "--");
} else
ret += fprintf(fp, "%s", " ");
}
str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
if (symbol_conf.show_branchflag_count) {
callchain_list_counts__printf_value(chain, NULL,
buf, sizeof(buf));
if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
str = "Not enough memory!";
else
str = alloc_str;
}
fputs(str, fp);
fputc('\n', fp);
free(alloc_str);
return ret;
}
static struct symbol *rem_sq_bracket;
static struct callchain_list rem_hits;
static void init_rem_hits(void)
{
rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
if (!rem_sq_bracket) {
fprintf(stderr, "Not enough memory to display remaining hits\n");
return;
}
strcpy(rem_sq_bracket->name, "[...]");
rem_hits.ms.sym = rem_sq_bracket;
}
static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
u64 total_samples, int depth,
int depth_mask, int left_margin)
{
struct rb_node *node, *next;
struct callchain_node *child = NULL;
struct callchain_list *chain;
int new_depth_mask = depth_mask;
u64 remaining;
size_t ret = 0;
int i;
uint entries_printed = 0;
int cumul_count = 0;
remaining = total_samples;
node = rb_first(root);
while (node) {
u64 new_total;
u64 cumul;
child = rb_entry(node, struct callchain_node, rb_node);
cumul = callchain_cumul_hits(child);
remaining -= cumul;
cumul_count += callchain_cumul_counts(child);
/*
* The depth mask manages the output of pipes that show
* the depth. We don't want to keep the pipes of the current
* level for the last child of this depth.
* Except if we have remaining filtered hits. They will
* supersede the last child
*/
next = rb_next(node);
if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
new_depth_mask &= ~(1 << (depth - 1));
/*
* But we keep the older depth mask for the line separator
* to keep the level link until we reach the last child
*/
ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
left_margin);
i = 0;
list_for_each_entry(chain, &child->val, list) {
ret += ipchain__fprintf_graph(fp, child, chain, depth,
new_depth_mask, i++,
total_samples,
left_margin);
}
if (callchain_param.mode == CHAIN_GRAPH_REL)
new_total = child->children_hit;
else
new_total = total_samples;
ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
depth + 1,
new_depth_mask | (1 << depth),
left_margin);
node = next;
if (++entries_printed == callchain_param.print_limit)
break;
}
if (callchain_param.mode == CHAIN_GRAPH_REL &&
remaining && remaining != total_samples) {
struct callchain_node rem_node = {
.hit = remaining,
};
if (!rem_sq_bracket)
return ret;
if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
rem_node.count = child->parent->children_count - cumul_count;
if (rem_node.count <= 0)
return ret;
}
new_depth_mask &= ~(1 << (depth - 1));
ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
new_depth_mask, 0, total_samples,
left_margin);
}
return ret;
}
/*
* If have one single callchain root, don't bother printing
* its percentage (100 % in fractal mode and the same percentage
* than the hist in graph mode). This also avoid one level of column.
*
* However when percent-limit applied, it's possible that single callchain
* node have different (non-100% in fractal mode) percentage.
*/
static bool need_percent_display(struct rb_node *node, u64 parent_samples)
{
struct callchain_node *cnode;
if (rb_next(node))
return true;
cnode = rb_entry(node, struct callchain_node, rb_node);
return callchain_cumul_hits(cnode) != parent_samples;
}
static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
u64 total_samples, u64 parent_samples,
int left_margin)
{
struct callchain_node *cnode;
struct callchain_list *chain;
u32 entries_printed = 0;
bool printed = false;
struct rb_node *node;
int i = 0;
int ret = 0;
char bf[1024];
node = rb_first(root);
if (node && !need_percent_display(node, parent_samples)) {
cnode = rb_entry(node, struct callchain_node, rb_node);
list_for_each_entry(chain, &cnode->val, list) {
/*
* If we sort by symbol, the first entry is the same than
* the symbol. No need to print it otherwise it appears as
* displayed twice.
*/
if (!i++ && field_order == NULL &&
sort_order && strstarts(sort_order, "sym"))
continue;
if (!printed) {
ret += callchain__fprintf_left_margin(fp, left_margin);
ret += fprintf(fp, "|\n");
ret += callchain__fprintf_left_margin(fp, left_margin);
ret += fprintf(fp, "---");
left_margin += 3;
printed = true;
} else
ret += callchain__fprintf_left_margin(fp, left_margin);
ret += fprintf(fp, "%s",
callchain_list__sym_name(chain, bf,
sizeof(bf),
false));
if (symbol_conf.show_branchflag_count)
ret += callchain_list_counts__printf_value(
chain, fp, NULL, 0);
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
}
root = &cnode->rb_root;
}
if (callchain_param.mode == CHAIN_GRAPH_REL)
total_samples = parent_samples;
ret += __callchain__fprintf_graph(fp, root, total_samples,
1, 1, left_margin);
if (ret) {
/* do not add a blank line if it printed nothing */
ret += fprintf(fp, "\n");
}
return ret;
}
static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
u64 total_samples)
{
struct callchain_list *chain;
size_t ret = 0;
char bf[1024];
if (!node)
return 0;
ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
list_for_each_entry(chain, &node->val, list) {
if (chain->ip >= PERF_CONTEXT_MAX)
continue;
ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
bf, sizeof(bf), false));
}
return ret;
}
static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
u64 total_samples)
{
size_t ret = 0;
u32 entries_printed = 0;
struct callchain_node *chain;
struct rb_node *rb_node = rb_first(tree);
while (rb_node) {
chain = rb_entry(rb_node, struct callchain_node, rb_node);
ret += fprintf(fp, " ");
ret += callchain_node__fprintf_value(chain, fp, total_samples);
ret += fprintf(fp, "\n");
ret += __callchain__fprintf_flat(fp, chain, total_samples);
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
rb_node = rb_next(rb_node);
}
return ret;
}
static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
{
const char *sep = symbol_conf.field_sep ?: ";";
struct callchain_list *chain;
size_t ret = 0;
char bf[1024];
bool first;
if (!node)
return 0;
ret += __callchain__fprintf_folded(fp, node->parent);
first = (ret == 0);
list_for_each_entry(chain, &node->val, list) {
if (chain->ip >= PERF_CONTEXT_MAX)
continue;
ret += fprintf(fp, "%s%s", first ? "" : sep,
callchain_list__sym_name(chain,
bf, sizeof(bf), false));
first = false;
}
return ret;
}
static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
u64 total_samples)
{
size_t ret = 0;
u32 entries_printed = 0;
struct callchain_node *chain;
struct rb_node *rb_node = rb_first(tree);
while (rb_node) {
chain = rb_entry(rb_node, struct callchain_node, rb_node);
ret += callchain_node__fprintf_value(chain, fp, total_samples);
ret += fprintf(fp, " ");
ret += __callchain__fprintf_folded(fp, chain);
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
rb_node = rb_next(rb_node);
}
return ret;
}
static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
u64 total_samples, int left_margin,
FILE *fp)
{
u64 parent_samples = he->stat.period;
if (symbol_conf.cumulate_callchain)
parent_samples = he->stat_acc->period;
switch (callchain_param.mode) {
case CHAIN_GRAPH_REL:
return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
parent_samples, left_margin);
break;
case CHAIN_GRAPH_ABS:
return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
parent_samples, left_margin);
break;
case CHAIN_FLAT:
return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
break;
case CHAIN_FOLDED:
return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
break;
case CHAIN_NONE:
break;
default:
pr_err("Bad callchain mode\n");
}
return 0;
}
int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_list *hpp_list)
{
const char *sep = symbol_conf.field_sep;
struct perf_hpp_fmt *fmt;
char *start = hpp->buf;
int ret;
bool first = true;
if (symbol_conf.exclude_other && !he->parent)
return 0;
perf_hpp_list__for_each_format(hpp_list, fmt) {
if (perf_hpp__should_skip(fmt, he->hists))
continue;
/*
* If there's no field_sep, we still need
* to display initial ' '.
*/
if (!sep || !first) {
ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
advance_hpp(hpp, ret);
} else
first = false;
if (perf_hpp__use_color() && fmt->color)
ret = fmt->color(fmt, hpp, he);
else
ret = fmt->entry(fmt, hpp, he);
ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
advance_hpp(hpp, ret);
}
return hpp->buf - start;
}
static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
{
return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
}
static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
struct perf_hpp *hpp,
struct hists *hists,
FILE *fp)
{
const char *sep = symbol_conf.field_sep;
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
char *buf = hpp->buf;
size_t size = hpp->size;
int ret, printed = 0;
bool first = true;
if (symbol_conf.exclude_other && !he->parent)
return 0;
ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
advance_hpp(hpp, ret);
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
/*
* If there's no field_sep, we still need
* to display initial ' '.
*/
if (!sep || !first) {
ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
advance_hpp(hpp, ret);
} else
first = false;
if (perf_hpp__use_color() && fmt->color)
ret = fmt->color(fmt, hpp, he);
else
ret = fmt->entry(fmt, hpp, he);
ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
advance_hpp(hpp, ret);
}
if (!sep)
ret = scnprintf(hpp->buf, hpp->size, "%*s",
(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
advance_hpp(hpp, ret);
printed += fprintf(fp, "%s", buf);
perf_hpp_list__for_each_format(he->hpp_list, fmt) {
hpp->buf = buf;
hpp->size = size;
/*
* No need to call hist_entry__snprintf_alignment() since this
* fmt is always the last column in the hierarchy mode.
*/
if (perf_hpp__use_color() && fmt->color)
fmt->color(fmt, hpp, he);
else
fmt->entry(fmt, hpp, he);
/*
* dynamic entries are right-aligned but we want left-aligned
* in the hierarchy mode
*/
printed += fprintf(fp, "%s%s", sep ?: " ", skip_spaces(buf));
}
printed += putc('\n', fp);
if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
u64 total = hists__total_period(hists);
printed += hist_entry_callchain__fprintf(he, total, 0, fp);
goto out;
}
out:
return printed;
}
static int hist_entry__block_fprintf(struct hist_entry *he,
char *bf, size_t size,
FILE *fp)
{
struct block_hist *bh = container_of(he, struct block_hist, he);
int ret = 0;
for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
struct perf_hpp hpp = {
.buf = bf,
.size = size,
.skip = false,
};
bh->block_idx = i;
hist_entry__snprintf(he, &hpp);
if (!hpp.skip)
ret += fprintf(fp, "%s\n", bf);
}
return ret;
}
static int hist_entry__individual_block_fprintf(struct hist_entry *he,
char *bf, size_t size,
FILE *fp)
{
int ret = 0;
struct perf_hpp hpp = {
.buf = bf,
.size = size,
.skip = false,
};
hist_entry__snprintf(he, &hpp);
if (!hpp.skip)
ret += fprintf(fp, "%s\n", bf);
return ret;
}
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
char *bf, size_t bfsz, FILE *fp,
bool ignore_callchains)
{
int ret;
int callchain_ret = 0;
struct perf_hpp hpp = {
.buf = bf,
.size = size,
};
struct hists *hists = he->hists;
u64 total_period = hists->stats.total_period;
if (size == 0 || size > bfsz)
size = hpp.size = bfsz;
if (symbol_conf.report_hierarchy)
return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
if (symbol_conf.report_block)
return hist_entry__block_fprintf(he, bf, size, fp);
if (symbol_conf.report_individual_block)
return hist_entry__individual_block_fprintf(he, bf, size, fp);
hist_entry__snprintf(he, &hpp);
ret = fprintf(fp, "%s\n", bf);
if (hist_entry__has_callchains(he) && !ignore_callchains)
callchain_ret = hist_entry_callchain__fprintf(he, total_period,
0, fp);
ret += callchain_ret;
return ret;
}
static int print_hierarchy_indent(const char *sep, int indent,
const char *line, FILE *fp)
{
int width;
if (sep != NULL || indent < 2)
return 0;
width = (indent - 2) * HIERARCHY_INDENT;
return fprintf(fp, "%-*.*s", width, width, line);
}
static int hists__fprintf_hierarchy_headers(struct hists *hists,
struct perf_hpp *hpp, FILE *fp)
{
bool first_node, first_col;
int indent;
int depth;
unsigned width = 0;
unsigned header_width = 0;
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
const char *sep = symbol_conf.field_sep;
indent = hists->nr_hpp_node;
/* preserve max indent depth for column headers */
print_hierarchy_indent(sep, indent, " ", fp);
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
fmt->header(fmt, hpp, hists, 0, NULL);
fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
}
/* combine sort headers with ' / ' */
first_node = true;
list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
if (!first_node)
header_width += fprintf(fp, " / ");
first_node = false;
first_col = true;
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first_col)
header_width += fprintf(fp, "+");
first_col = false;
fmt->header(fmt, hpp, hists, 0, NULL);
header_width += fprintf(fp, "%s", strim(hpp->buf));
}
}
fprintf(fp, "\n# ");
/* preserve max indent depth for initial dots */
print_hierarchy_indent(sep, indent, dots, fp);
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
first_col = true;
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (!first_col)
fprintf(fp, "%s", sep ?: "..");
first_col = false;
width = fmt->width(fmt, hpp, hists);
fprintf(fp, "%.*s", width, dots);
}
depth = 0;
list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
first_col = true;
width = depth * HIERARCHY_INDENT;
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first_col)
width++; /* for '+' sign between column header */
first_col = false;
width += fmt->width(fmt, hpp, hists);
}
if (width > header_width)
header_width = width;
depth++;
}
fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
fprintf(fp, "\n#\n");
return 2;
}
static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
int line, FILE *fp)
{
struct perf_hpp_fmt *fmt;
const char *sep = symbol_conf.field_sep;
bool first = true;
int span = 0;
hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first && !span)
fprintf(fp, "%s", sep ?: " ");
else
first = false;
fmt->header(fmt, hpp, hists, line, &span);
if (!span)
fprintf(fp, "%s", hpp->buf);
}
}
static int
hists__fprintf_standard_headers(struct hists *hists,
struct perf_hpp *hpp,
FILE *fp)
{
struct perf_hpp_list *hpp_list = hists->hpp_list;
struct perf_hpp_fmt *fmt;
unsigned int width;
const char *sep = symbol_conf.field_sep;
bool first = true;
int line;
for (line = 0; line < hpp_list->nr_header_lines; line++) {
/* first # is displayed one level up */
if (line)
fprintf(fp, "# ");
fprintf_line(hists, hpp, line, fp);
fprintf(fp, "\n");
}
if (sep)
return hpp_list->nr_header_lines;
first = true;
fprintf(fp, "# ");
hists__for_each_format(hists, fmt) {
unsigned int i;
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first)
fprintf(fp, "%s", sep ?: " ");
else
first = false;
width = fmt->width(fmt, hpp, hists);
for (i = 0; i < width; i++)
fprintf(fp, ".");
}
fprintf(fp, "\n");
fprintf(fp, "#\n");
return hpp_list->nr_header_lines + 2;
}
int hists__fprintf_headers(struct hists *hists, FILE *fp)
{
char bf[1024];
struct perf_hpp dummy_hpp = {
.buf = bf,
.size = sizeof(bf),
};
fprintf(fp, "# ");
if (symbol_conf.report_hierarchy)
return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
else
return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
}
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
bool ignore_callchains)
{
struct rb_node *nd;
size_t ret = 0;
const char *sep = symbol_conf.field_sep;
int nr_rows = 0;
size_t linesz;
char *line = NULL;
unsigned indent;
init_rem_hits();
hists__reset_column_width(hists);
if (symbol_conf.col_width_list_str)
perf_hpp__set_user_width(symbol_conf.col_width_list_str);
if (show_header)
nr_rows += hists__fprintf_headers(hists, fp);
if (max_rows && nr_rows >= max_rows)
goto out;
linesz = hists__sort_list_width(hists) + 3 + 1;
linesz += perf_hpp__color_overhead();
line = malloc(linesz);
if (line == NULL) {
ret = -1;
goto out;
}
indent = hists__overhead_width(hists) + 4;
for (nd = rb_first_cached(&hists->entries); nd;
nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
if (h->filtered)
continue;
if (symbol_conf.report_individual_block)
percent = block_info__total_cycles_percent(h);
else
percent = hist_entry__get_percent_limit(h);
if (percent < min_pcnt)
continue;
ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
if (max_rows && ++nr_rows >= max_rows)
break;
/*
* If all children are filtered out or percent-limited,
* display "no entry >= x.xx%" message.
*/
if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
int depth = hists->nr_hpp_node + h->depth + 1;
print_hierarchy_indent(sep, depth, " ", fp);
fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
if (max_rows && ++nr_rows >= max_rows)
break;
}
if (h->ms.map == NULL && verbose > 1) {
maps__fprintf(thread__maps(h->thread), fp);
fprintf(fp, "%.10s end\n", graph_dotted_line);
}
}
free(line);
out:
zfree(&rem_sq_bracket);
return ret;
}
size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
bool skip_empty)
{
int i;
size_t ret = 0;
u32 total = stats->nr_events[0];
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
const char *name;
name = perf_event__name(i);
if (!strcmp(name, "UNKNOWN"))
continue;
if (skip_empty && !stats->nr_events[i])
continue;
if (i && total) {
ret += fprintf(fp, "%16s events: %10d (%4.1f%%)\n",
name, stats->nr_events[i],
100.0 * stats->nr_events[i] / total);
} else {
ret += fprintf(fp, "%16s events: %10d\n",
name, stats->nr_events[i]);
}
}
return ret;
}
| linux-master | tools/perf/ui/stdio/hist.c |
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
#include "../helpline.h"
#include <signal.h>
void perf_gtk__signal(int sig)
{
perf_gtk__exit(false);
psignal(sig, "perf");
}
void perf_gtk__resize_window(GtkWidget *window)
{
GdkRectangle rect;
GdkScreen *screen;
int monitor;
int height;
int width;
screen = gtk_widget_get_screen(window);
monitor = gdk_screen_get_monitor_at_window(screen, window->window);
gdk_screen_get_monitor_geometry(screen, monitor, &rect);
width = rect.width * 3 / 4;
height = rect.height * 3 / 4;
gtk_window_resize(GTK_WINDOW(window), width, height);
}
const char *perf_gtk__get_percent_color(double percent)
{
if (percent >= MIN_RED)
return "<span fgcolor='red'>";
if (percent >= MIN_GREEN)
return "<span fgcolor='dark green'>";
return NULL;
}
#ifdef HAVE_GTK_INFO_BAR_SUPPORT
GtkWidget *perf_gtk__setup_info_bar(void)
{
GtkWidget *info_bar;
GtkWidget *label;
GtkWidget *content_area;
info_bar = gtk_info_bar_new();
gtk_widget_set_no_show_all(info_bar, TRUE);
label = gtk_label_new("");
gtk_widget_show(label);
content_area = gtk_info_bar_get_content_area(GTK_INFO_BAR(info_bar));
gtk_container_add(GTK_CONTAINER(content_area), label);
gtk_info_bar_add_button(GTK_INFO_BAR(info_bar), GTK_STOCK_OK,
GTK_RESPONSE_OK);
g_signal_connect(info_bar, "response",
G_CALLBACK(gtk_widget_hide), NULL);
pgctx->info_bar = info_bar;
pgctx->message_label = label;
return info_bar;
}
#endif
GtkWidget *perf_gtk__setup_statusbar(void)
{
GtkWidget *stbar;
unsigned ctxid;
stbar = gtk_statusbar_new();
ctxid = gtk_statusbar_get_context_id(GTK_STATUSBAR(stbar),
"perf report");
pgctx->statbar = stbar;
pgctx->statbar_ctx_id = ctxid;
return stbar;
}
| linux-master | tools/perf/ui/gtk/browser.c |
// SPDX-License-Identifier: GPL-2.0
#include "../util.h"
#include "gtk.h"
#include <stdlib.h>
#include <string.h>
#include <linux/zalloc.h>
struct perf_gtk_context *pgctx;
struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window)
{
struct perf_gtk_context *ctx;
ctx = malloc(sizeof(*pgctx));
if (ctx)
ctx->main_window = window;
return ctx;
}
int perf_gtk__deactivate_context(struct perf_gtk_context **ctx)
{
if (!perf_gtk__is_active_context(*ctx))
return -1;
zfree(ctx);
return 0;
}
static int perf_gtk__error(const char *format, va_list args)
{
char *msg;
GtkWidget *dialog;
if (!perf_gtk__is_active_context(pgctx) ||
vasprintf(&msg, format, args) < 0) {
fprintf(stderr, "Error:\n");
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
return -1;
}
dialog = gtk_message_dialog_new_with_markup(GTK_WINDOW(pgctx->main_window),
GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_MESSAGE_ERROR,
GTK_BUTTONS_CLOSE,
"<b>Error</b>\n\n%s", msg);
gtk_dialog_run(GTK_DIALOG(dialog));
gtk_widget_destroy(dialog);
free(msg);
return 0;
}
#ifdef HAVE_GTK_INFO_BAR_SUPPORT
static int perf_gtk__warning_info_bar(const char *format, va_list args)
{
char *msg;
if (!perf_gtk__is_active_context(pgctx) ||
vasprintf(&msg, format, args) < 0) {
fprintf(stderr, "Warning:\n");
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
return -1;
}
gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg);
gtk_info_bar_set_message_type(GTK_INFO_BAR(pgctx->info_bar),
GTK_MESSAGE_WARNING);
gtk_widget_show(pgctx->info_bar);
free(msg);
return 0;
}
#else
static int perf_gtk__warning_statusbar(const char *format, va_list args)
{
char *msg, *p;
if (!perf_gtk__is_active_context(pgctx) ||
vasprintf(&msg, format, args) < 0) {
fprintf(stderr, "Warning:\n");
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
return -1;
}
gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar),
pgctx->statbar_ctx_id);
/* Only first line can be displayed */
p = strchr(msg, '\n');
if (p)
*p = '\0';
gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar),
pgctx->statbar_ctx_id, msg);
free(msg);
return 0;
}
#endif
struct perf_error_ops perf_gtk_eops = {
.error = perf_gtk__error,
#ifdef HAVE_GTK_INFO_BAR_SUPPORT
.warning = perf_gtk__warning_info_bar,
#else
.warning = perf_gtk__warning_statusbar,
#endif
};
| linux-master | tools/perf/ui/gtk/util.c |
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
#include <stdio.h>
#include <string.h>
#include <linux/kernel.h>
#include "../ui.h"
#include "../helpline.h"
static void gtk_helpline_pop(void)
{
if (!perf_gtk__is_active_context(pgctx))
return;
gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar),
pgctx->statbar_ctx_id);
}
static void gtk_helpline_push(const char *msg)
{
if (!perf_gtk__is_active_context(pgctx))
return;
gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar),
pgctx->statbar_ctx_id, msg);
}
static int gtk_helpline_show(const char *fmt, va_list ap)
{
int ret;
char *ptr;
static int backlog;
ret = vscnprintf(ui_helpline__current + backlog,
sizeof(ui_helpline__current) - backlog, fmt, ap);
backlog += ret;
/* only first line can be displayed */
ptr = strchr(ui_helpline__current, '\n');
if (ptr && (ptr - ui_helpline__current) <= backlog) {
*ptr = '\0';
ui_helpline__puts(ui_helpline__current);
backlog = 0;
}
return ret;
}
static struct ui_helpline gtk_helpline_fns = {
.pop = gtk_helpline_pop,
.push = gtk_helpline_push,
.show = gtk_helpline_show,
};
void perf_gtk__init_helpline(void)
{
helpline_fns = >k_helpline_fns;
}
| linux-master | tools/perf/ui/gtk/helpline.c |
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
#include <linux/compiler.h>
#include "../util.h"
extern struct perf_error_ops perf_gtk_eops;
int perf_gtk__init(void)
{
perf_error__register(&perf_gtk_eops);
perf_gtk__init_helpline();
gtk_ui_progress__init();
perf_gtk__init_hpp();
return gtk_init_check(NULL, NULL) ? 0 : -1;
}
void perf_gtk__exit(bool wait_for_ok __maybe_unused)
{
if (!perf_gtk__is_active_context(pgctx))
return;
perf_error__unregister(&perf_gtk_eops);
gtk_main_quit();
}
| linux-master | tools/perf/ui/gtk/setup.c |
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
#include "util/sort.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"
#include "util/map.h"
#include "util/dso.h"
#include "util/symbol.h"
#include "ui/helpline.h"
#include <inttypes.h>
#include <signal.h>
enum {
ANN_COL__PERCENT,
ANN_COL__OFFSET,
ANN_COL__LINE,
MAX_ANN_COLS
};
static const char *const col_names[] = {
"Overhead",
"Offset",
"Line"
};
static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
struct disasm_line *dl, int evidx)
{
struct sym_hist *symhist;
double percent = 0.0;
const char *markup;
int ret = 0;
strcpy(buf, "");
if (dl->al.offset == (s64) -1)
return 0;
symhist = annotation__histogram(symbol__annotation(sym), evidx);
if (!symbol_conf.event_group && !symhist->addr[dl->al.offset].nr_samples)
return 0;
percent = 100.0 * symhist->addr[dl->al.offset].nr_samples / symhist->nr_samples;
markup = perf_gtk__get_percent_color(percent);
if (markup)
ret += scnprintf(buf, size, "%s", markup);
ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
return ret;
}
static int perf_gtk__get_offset(char *buf, size_t size, struct map_symbol *ms,
struct disasm_line *dl)
{
u64 start = map__rip_2objdump(ms->map, ms->sym->start);
strcpy(buf, "");
if (dl->al.offset == (s64) -1)
return 0;
return scnprintf(buf, size, "%"PRIx64, start + dl->al.offset);
}
static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
{
int ret = 0;
char *line = g_markup_escape_text(dl->al.line, -1);
const char *markup = "<span fgcolor='gray'>";
strcpy(buf, "");
if (!line)
return 0;
if (dl->al.offset != (s64) -1)
markup = NULL;
if (markup)
ret += scnprintf(buf, size, "%s", markup);
ret += scnprintf(buf + ret, size - ret, "%s", line);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
g_free(line);
return ret;
}
static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms,
struct evsel *evsel,
struct hist_browser_timer *hbt __maybe_unused)
{
struct symbol *sym = ms->sym;
struct disasm_line *pos, *n;
struct annotation *notes;
GType col_types[MAX_ANN_COLS];
GtkCellRenderer *renderer;
GtkListStore *store;
GtkWidget *view;
int i;
char s[512];
notes = symbol__annotation(sym);
for (i = 0; i < MAX_ANN_COLS; i++) {
col_types[i] = G_TYPE_STRING;
}
store = gtk_list_store_newv(MAX_ANN_COLS, col_types);
view = gtk_tree_view_new();
renderer = gtk_cell_renderer_text_new();
for (i = 0; i < MAX_ANN_COLS; i++) {
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, col_names[i], renderer, "markup",
i, NULL);
}
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
list_for_each_entry(pos, ¬es->src->source, al.node) {
GtkTreeIter iter;
int ret = 0;
gtk_list_store_append(store, &iter);
if (evsel__is_group_event(evsel)) {
for (i = 0; i < evsel->core.nr_members; i++) {
ret += perf_gtk__get_percent(s + ret,
sizeof(s) - ret,
sym, pos,
evsel->core.idx + i);
ret += scnprintf(s + ret, sizeof(s) - ret, " ");
}
} else {
ret = perf_gtk__get_percent(s, sizeof(s), sym, pos,
evsel->core.idx);
}
if (ret)
gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
if (perf_gtk__get_offset(s, sizeof(s), ms, pos))
gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
if (perf_gtk__get_line(s, sizeof(s), pos))
gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1);
}
gtk_container_add(GTK_CONTAINER(window), view);
list_for_each_entry_safe(pos, n, ¬es->src->source, al.node) {
list_del_init(&pos->al.node);
disasm_line__free(pos);
}
return 0;
}
static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *options,
struct hist_browser_timer *hbt)
{
struct dso *dso = map__dso(ms->map);
struct symbol *sym = ms->sym;
GtkWidget *window;
GtkWidget *notebook;
GtkWidget *scrolled_window;
GtkWidget *tab_label;
int err;
if (dso->annotate_warned)
return -1;
err = symbol__annotate(ms, evsel, options, NULL);
if (err) {
char msg[BUFSIZ];
dso->annotate_warned = true;
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s: %s\n", sym->name, msg);
return -1;
}
symbol__calc_percent(sym, evsel);
if (perf_gtk__is_active_context(pgctx)) {
window = pgctx->main_window;
notebook = pgctx->notebook;
} else {
GtkWidget *vbox;
GtkWidget *infobar;
GtkWidget *statbar;
signal(SIGSEGV, perf_gtk__signal);
signal(SIGFPE, perf_gtk__signal);
signal(SIGINT, perf_gtk__signal);
signal(SIGQUIT, perf_gtk__signal);
signal(SIGTERM, perf_gtk__signal);
window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(window), "perf annotate");
g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
pgctx = perf_gtk__activate_context(window);
if (!pgctx)
return -1;
vbox = gtk_vbox_new(FALSE, 0);
notebook = gtk_notebook_new();
pgctx->notebook = notebook;
gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
infobar = perf_gtk__setup_info_bar();
if (infobar) {
gtk_box_pack_start(GTK_BOX(vbox), infobar,
FALSE, FALSE, 0);
}
statbar = perf_gtk__setup_statusbar();
gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
gtk_container_add(GTK_CONTAINER(window), vbox);
}
scrolled_window = gtk_scrolled_window_new(NULL, NULL);
tab_label = gtk_label_new(sym->name);
gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC);
gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window,
tab_label);
perf_gtk__annotate_symbol(scrolled_window, ms, evsel, hbt);
return 0;
}
int hist_entry__gtk_annotate(struct hist_entry *he,
struct evsel *evsel,
struct annotation_options *options,
struct hist_browser_timer *hbt)
{
return symbol__gtk_annotate(&he->ms, evsel, options, hbt);
}
void perf_gtk__show_annotations(void)
{
GtkWidget *window;
if (!perf_gtk__is_active_context(pgctx))
return;
window = pgctx->main_window;
gtk_widget_show_all(window);
perf_gtk__resize_window(window);
gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
gtk_main();
perf_gtk__deactivate_context(&pgctx);
}
| linux-master | tools/perf/ui/gtk/annotate.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include "gtk.h"
#include "../progress.h"
static GtkWidget *dialog;
static GtkWidget *progress;
static void gtk_ui_progress__update(struct ui_progress *p)
{
double fraction = p->total ? 1.0 * p->curr / p->total : 0.0;
char buf[1024];
if (dialog == NULL) {
GtkWidget *vbox = gtk_vbox_new(TRUE, 5);
GtkWidget *label = gtk_label_new(p->title);
dialog = gtk_window_new(GTK_WINDOW_TOPLEVEL);
progress = gtk_progress_bar_new();
gtk_box_pack_start(GTK_BOX(vbox), label, TRUE, FALSE, 3);
gtk_box_pack_start(GTK_BOX(vbox), progress, TRUE, TRUE, 3);
gtk_container_add(GTK_CONTAINER(dialog), vbox);
gtk_window_set_title(GTK_WINDOW(dialog), "perf");
gtk_window_resize(GTK_WINDOW(dialog), 300, 80);
gtk_window_set_position(GTK_WINDOW(dialog), GTK_WIN_POS_CENTER);
gtk_widget_show_all(dialog);
}
gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(progress), fraction);
snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, p->curr, p->total);
gtk_progress_bar_set_text(GTK_PROGRESS_BAR(progress), buf);
/* we didn't call gtk_main yet, so do it manually */
while (gtk_events_pending())
gtk_main_iteration();
}
static void gtk_ui_progress__finish(void)
{
/* this will also destroy all of its children */
gtk_widget_destroy(dialog);
dialog = NULL;
}
static struct ui_progress_ops gtk_ui_progress__ops = {
.update = gtk_ui_progress__update,
.finish = gtk_ui_progress__finish,
};
void gtk_ui_progress__init(void)
{
ui_progress__ops = >k_ui_progress__ops;
}
| linux-master | tools/perf/ui/gtk/progress.c |
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
#include "../evlist.h"
#include "../callchain.h"
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
#include "../helpline.h"
#include "../string2.h"
#include <signal.h>
#include <stdlib.h>
#include <linux/string.h>
#define MAX_COLUMNS 32
static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
int ret = 0;
int len;
va_list args;
double percent;
const char *markup;
char *buf = hpp->buf;
size_t size = hpp->size;
va_start(args, fmt);
len = va_arg(args, int);
percent = va_arg(args, double);
va_end(args);
markup = perf_gtk__get_percent_color(percent);
if (markup)
ret += scnprintf(buf, size, markup);
ret += scnprintf(buf + ret, size - ret, fmt, len, percent);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
return ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
__percent_color_snprintf, true); \
}
#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
static u64 he_get_acc_##_field(struct hist_entry *he) \
{ \
return he->stat_acc->_field; \
} \
\
static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
__percent_color_snprintf, true); \
}
__HPP_COLOR_PERCENT_FN(overhead, period)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys)
__HPP_COLOR_PERCENT_FN(overhead_us, period_us)
__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys)
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us)
__HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period)
#undef __HPP_COLOR_PERCENT_FN
void perf_gtk__init_hpp(void)
{
perf_hpp__format[PERF_HPP__OVERHEAD].color =
perf_gtk__hpp_color_overhead;
perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
perf_gtk__hpp_color_overhead_sys;
perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
perf_gtk__hpp_color_overhead_us;
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
perf_gtk__hpp_color_overhead_guest_sys;
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
perf_gtk__hpp_color_overhead_guest_us;
perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
perf_gtk__hpp_color_overhead_acc;
}
static void perf_gtk__add_callchain_flat(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total)
{
struct rb_node *nd;
bool has_single_node = (rb_first(root) == rb_last(root));
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node;
struct callchain_list *chain;
GtkTreeIter iter, new_parent;
bool need_new_parent;
node = rb_entry(nd, struct callchain_node, rb_node);
new_parent = *parent;
need_new_parent = !has_single_node;
callchain_node__make_parent_list(node);
list_for_each_entry(chain, &node->parent_val, list) {
char buf[128];
gtk_tree_store_append(store, &iter, &new_parent);
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
callchain_list__sym_name(chain, buf, sizeof(buf), false);
gtk_tree_store_set(store, &iter, col, buf, -1);
if (need_new_parent) {
/*
* Only show the top-most symbol in a callchain
* if it's not the only callchain.
*/
new_parent = iter;
need_new_parent = false;
}
}
list_for_each_entry(chain, &node->val, list) {
char buf[128];
gtk_tree_store_append(store, &iter, &new_parent);
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
callchain_list__sym_name(chain, buf, sizeof(buf), false);
gtk_tree_store_set(store, &iter, col, buf, -1);
if (need_new_parent) {
/*
* Only show the top-most symbol in a callchain
* if it's not the only callchain.
*/
new_parent = iter;
need_new_parent = false;
}
}
}
}
static void perf_gtk__add_callchain_folded(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total)
{
struct rb_node *nd;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node;
struct callchain_list *chain;
GtkTreeIter iter;
char buf[64];
char *str, *str_alloc = NULL;
bool first = true;
node = rb_entry(nd, struct callchain_node, rb_node);
callchain_node__make_parent_list(node);
list_for_each_entry(chain, &node->parent_val, list) {
char name[1024];
callchain_list__sym_name(chain, name, sizeof(name), false);
if (asprintf(&str, "%s%s%s",
first ? "" : str_alloc,
first ? "" : symbol_conf.field_sep ?: "; ",
name) < 0)
return;
first = false;
free(str_alloc);
str_alloc = str;
}
list_for_each_entry(chain, &node->val, list) {
char name[1024];
callchain_list__sym_name(chain, name, sizeof(name), false);
if (asprintf(&str, "%s%s%s",
first ? "" : str_alloc,
first ? "" : symbol_conf.field_sep ?: "; ",
name) < 0)
return;
first = false;
free(str_alloc);
str_alloc = str;
}
gtk_tree_store_append(store, &iter, parent);
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
gtk_tree_store_set(store, &iter, col, str, -1);
free(str_alloc);
}
}
static void perf_gtk__add_callchain_graph(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total)
{
struct rb_node *nd;
bool has_single_node = (rb_first(root) == rb_last(root));
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node;
struct callchain_list *chain;
GtkTreeIter iter, new_parent;
bool need_new_parent;
u64 child_total;
node = rb_entry(nd, struct callchain_node, rb_node);
new_parent = *parent;
need_new_parent = !has_single_node && (node->val_nr > 1);
list_for_each_entry(chain, &node->val, list) {
char buf[128];
gtk_tree_store_append(store, &iter, &new_parent);
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
callchain_list__sym_name(chain, buf, sizeof(buf), false);
gtk_tree_store_set(store, &iter, col, buf, -1);
if (need_new_parent) {
/*
* Only show the top-most symbol in a callchain
* if it's not the only callchain.
*/
new_parent = iter;
need_new_parent = false;
}
}
if (callchain_param.mode == CHAIN_GRAPH_REL)
child_total = node->children_hit;
else
child_total = total;
/* Now 'iter' contains info of the last callchain_list */
perf_gtk__add_callchain_graph(&node->rb_root, store, &iter, col,
child_total);
}
}
static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total)
{
if (callchain_param.mode == CHAIN_FLAT)
perf_gtk__add_callchain_flat(root, store, parent, col, total);
else if (callchain_param.mode == CHAIN_FOLDED)
perf_gtk__add_callchain_folded(root, store, parent, col, total);
else
perf_gtk__add_callchain_graph(root, store, parent, col, total);
}
static void on_row_activated(GtkTreeView *view, GtkTreePath *path,
GtkTreeViewColumn *col __maybe_unused,
gpointer user_data __maybe_unused)
{
bool expanded = gtk_tree_view_row_expanded(view, path);
if (expanded)
gtk_tree_view_collapse_row(view, path);
else
gtk_tree_view_expand_row(view, path, FALSE);
}
static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
float min_pcnt)
{
struct perf_hpp_fmt *fmt;
GType col_types[MAX_COLUMNS];
GtkCellRenderer *renderer;
GtkTreeStore *store;
struct rb_node *nd;
GtkWidget *view;
int col_idx;
int sym_col = -1;
int nr_cols;
char s[512];
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
};
nr_cols = 0;
hists__for_each_format(hists, fmt)
col_types[nr_cols++] = G_TYPE_STRING;
store = gtk_tree_store_newv(nr_cols, col_types);
view = gtk_tree_view_new();
renderer = gtk_cell_renderer_text_new();
col_idx = 0;
hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
/*
* XXX no way to determine where symcol column is..
* Just use last column for now.
*/
if (perf_hpp__is_sort_entry(fmt))
sym_col = col_idx;
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, fmt->name,
renderer, "markup",
col_idx++, NULL);
}
for (col_idx = 0; col_idx < nr_cols; col_idx++) {
GtkTreeViewColumn *column;
column = gtk_tree_view_get_column(GTK_TREE_VIEW(view), col_idx);
gtk_tree_view_column_set_resizable(column, TRUE);
if (col_idx == sym_col) {
gtk_tree_view_set_expander_column(GTK_TREE_VIEW(view),
column);
}
}
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
u64 total = hists__total_period(h->hists);
float percent;
if (h->filtered)
continue;
percent = hist_entry__get_percent_limit(h);
if (percent < min_pcnt)
continue;
gtk_tree_store_append(store, &iter, NULL);
col_idx = 0;
hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, h->hists))
continue;
if (fmt->color)
fmt->color(fmt, &hpp, h);
else
fmt->entry(fmt, &hpp, h);
gtk_tree_store_set(store, &iter, col_idx++, s, -1);
}
if (hist_entry__has_callchains(h) &&
symbol_conf.use_callchain && hists__has(hists, sym)) {
if (callchain_param.mode == CHAIN_GRAPH_REL)
total = symbol_conf.cumulate_callchain ?
h->stat_acc->period : h->stat.period;
perf_gtk__add_callchain(&h->sorted_chain, store, &iter,
sym_col, total);
}
}
gtk_tree_view_set_rules_hint(GTK_TREE_VIEW(view), TRUE);
g_signal_connect(view, "row-activated",
G_CALLBACK(on_row_activated), NULL);
gtk_container_add(GTK_CONTAINER(window), view);
}
static void perf_gtk__add_hierarchy_entries(struct hists *hists,
struct rb_root_cached *root,
GtkTreeStore *store,
GtkTreeIter *parent,
struct perf_hpp *hpp,
float min_pcnt)
{
int col_idx = 0;
struct rb_node *node;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
u64 total = hists__total_period(hists);
int size;
for (node = rb_first_cached(root); node; node = rb_next(node)) {
GtkTreeIter iter;
float percent;
char *bf;
he = rb_entry(node, struct hist_entry, rb_node);
if (he->filtered)
continue;
percent = hist_entry__get_percent_limit(he);
if (percent < min_pcnt)
continue;
gtk_tree_store_append(store, &iter, parent);
col_idx = 0;
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (fmt->color)
fmt->color(fmt, hpp, he);
else
fmt->entry(fmt, hpp, he);
gtk_tree_store_set(store, &iter, col_idx++, hpp->buf, -1);
}
bf = hpp->buf;
size = hpp->size;
perf_hpp_list__for_each_format(he->hpp_list, fmt) {
int ret;
if (fmt->color)
ret = fmt->color(fmt, hpp, he);
else
ret = fmt->entry(fmt, hpp, he);
snprintf(hpp->buf + ret, hpp->size - ret, " ");
advance_hpp(hpp, ret + 2);
}
gtk_tree_store_set(store, &iter, col_idx, strim(bf), -1);
if (!he->leaf) {
hpp->buf = bf;
hpp->size = size;
perf_gtk__add_hierarchy_entries(hists, &he->hroot_out,
store, &iter, hpp,
min_pcnt);
if (!hist_entry__has_hierarchy_children(he, min_pcnt)) {
char buf[32];
GtkTreeIter child;
snprintf(buf, sizeof(buf), "no entry >= %.2f%%",
min_pcnt);
gtk_tree_store_append(store, &child, &iter);
gtk_tree_store_set(store, &child, col_idx, buf, -1);
}
}
if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL)
total = symbol_conf.cumulate_callchain ?
he->stat_acc->period : he->stat.period;
perf_gtk__add_callchain(&he->sorted_chain, store, &iter,
col_idx, total);
}
}
}
static void perf_gtk__show_hierarchy(GtkWidget *window, struct hists *hists,
float min_pcnt)
{
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
GType col_types[MAX_COLUMNS];
GtkCellRenderer *renderer;
GtkTreeStore *store;
GtkWidget *view;
int col_idx;
int nr_cols = 0;
char s[512];
char buf[512];
bool first_node, first_col;
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
};
hists__for_each_format(hists, fmt) {
if (perf_hpp__is_sort_entry(fmt) ||
perf_hpp__is_dynamic_entry(fmt))
break;
col_types[nr_cols++] = G_TYPE_STRING;
}
col_types[nr_cols++] = G_TYPE_STRING;
store = gtk_tree_store_newv(nr_cols, col_types);
view = gtk_tree_view_new();
renderer = gtk_cell_renderer_text_new();
col_idx = 0;
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, fmt->name,
renderer, "markup",
col_idx++, NULL);
}
/* construct merged column header since sort keys share single column */
buf[0] = '\0';
first_node = true;
list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
if (!first_node)
strcat(buf, " / ");
first_node = false;
first_col = true;
perf_hpp_list__for_each_format(&fmt_node->hpp ,fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first_col)
strcat(buf, "+");
first_col = false;
fmt->header(fmt, &hpp, hists, 0, NULL);
strcat(buf, strim(hpp.buf));
}
}
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, buf,
renderer, "markup",
col_idx++, NULL);
for (col_idx = 0; col_idx < nr_cols; col_idx++) {
GtkTreeViewColumn *column;
column = gtk_tree_view_get_column(GTK_TREE_VIEW(view), col_idx);
gtk_tree_view_column_set_resizable(column, TRUE);
if (col_idx == 0) {
gtk_tree_view_set_expander_column(GTK_TREE_VIEW(view),
column);
}
}
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
perf_gtk__add_hierarchy_entries(hists, &hists->entries, store,
NULL, &hpp, min_pcnt);
gtk_tree_view_set_rules_hint(GTK_TREE_VIEW(view), TRUE);
g_signal_connect(view, "row-activated",
G_CALLBACK(on_row_activated), NULL);
gtk_container_add(GTK_CONTAINER(window), view);
}
int evlist__gtk_browse_hists(struct evlist *evlist, const char *help,
struct hist_browser_timer *hbt __maybe_unused, float min_pcnt)
{
struct evsel *pos;
GtkWidget *vbox;
GtkWidget *notebook;
GtkWidget *info_bar;
GtkWidget *statbar;
GtkWidget *window;
signal(SIGSEGV, perf_gtk__signal);
signal(SIGFPE, perf_gtk__signal);
signal(SIGINT, perf_gtk__signal);
signal(SIGQUIT, perf_gtk__signal);
signal(SIGTERM, perf_gtk__signal);
window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(window), "perf report");
g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
pgctx = perf_gtk__activate_context(window);
if (!pgctx)
return -1;
vbox = gtk_vbox_new(FALSE, 0);
notebook = gtk_notebook_new();
gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
info_bar = perf_gtk__setup_info_bar();
if (info_bar)
gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
statbar = perf_gtk__setup_statusbar();
gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
gtk_container_add(GTK_CONTAINER(window), vbox);
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
const char *evname = evsel__name(pos);
GtkWidget *scrolled_window;
GtkWidget *tab_label;
char buf[512];
size_t size = sizeof(buf);
if (symbol_conf.event_group) {
if (!evsel__is_group_leader(pos))
continue;
if (pos->core.nr_members > 1) {
evsel__group_desc(pos, buf, size);
evname = buf;
}
}
scrolled_window = gtk_scrolled_window_new(NULL, NULL);
gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC);
if (symbol_conf.report_hierarchy)
perf_gtk__show_hierarchy(scrolled_window, hists, min_pcnt);
else
perf_gtk__show_hists(scrolled_window, hists, min_pcnt);
tab_label = gtk_label_new(evname);
gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
}
gtk_widget_show_all(window);
perf_gtk__resize_window(window);
gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
ui_helpline__push(help);
gtk_main();
perf_gtk__deactivate_context(&pgctx);
return 0;
}
| linux-master | tools/perf/ui/gtk/hists.c |
// SPDX-License-Identifier: GPL-2.0
/*
* An empty pmu-events.c file used when there is no architecture json files in
* arch or when the jevents.py script cannot be run.
*
* The test cpu/soc is provided for testing.
*/
#include "pmu-events/pmu-events.h"
#include "util/header.h"
#include "util/pmu.h"
#include <string.h>
#include <stddef.h>
static const struct pmu_event pmu_events__test_soc_cpu[] = {
{
.name = "l3_cache_rd",
.event = "event=0x40",
.desc = "L3 cache access, read",
.topic = "cache",
.long_desc = "Attributable Level 3 cache access, read",
},
{
.name = "segment_reg_loads.any",
.event = "event=0x6,period=200000,umask=0x80",
.desc = "Number of segment register loads",
.topic = "other",
},
{
.name = "dispatch_blocked.any",
.event = "event=0x9,period=200000,umask=0x20",
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
{
.name = "eist_trans",
.event = "event=0x3a,period=200000,umask=0x0",
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
{
.name = "uncore_hisi_ddrc.flux_wcmd",
.event = "event=0x2",
.desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
.topic = "uncore",
.long_desc = "DDRC write commands",
.pmu = "hisi_sccl,ddrc",
},
{
.name = "unc_cbo_xsnp_response.miss_eviction",
.event = "event=0x22,umask=0x81",
.desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
.topic = "uncore",
.long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.pmu = "uncore_cbox",
},
{
.name = "event-hyphen",
.event = "event=0xe0,umask=0x00",
.desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
.topic = "uncore",
.long_desc = "UNC_CBO_HYPHEN",
.pmu = "uncore_cbox",
},
{
.name = "event-two-hyph",
.event = "event=0xc0,umask=0x00",
.desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
.topic = "uncore",
.long_desc = "UNC_CBO_TWO_HYPH",
.pmu = "uncore_cbox",
},
{
.name = "uncore_hisi_l3c.rd_hit_cpipe",
.event = "event=0x7",
.desc = "Total read hits. Unit: hisi_sccl,l3c ",
.topic = "uncore",
.long_desc = "Total read hits",
.pmu = "hisi_sccl,l3c",
},
{
.name = "uncore_imc_free_running.cache_miss",
.event = "event=0x12",
.desc = "Total cache misses. Unit: uncore_imc_free_running ",
.topic = "uncore",
.long_desc = "Total cache misses",
.pmu = "uncore_imc_free_running",
},
{
.name = "uncore_imc.cache_hits",
.event = "event=0x34",
.desc = "Total cache hits. Unit: uncore_imc ",
.topic = "uncore",
.long_desc = "Total cache hits",
.pmu = "uncore_imc",
},
{
.name = "bp_l1_btb_correct",
.event = "event=0x8a",
.desc = "L1 BTB Correction",
.topic = "branch",
},
{
.name = "bp_l2_btb_correct",
.event = "event=0x8b",
.desc = "L2 BTB Correction",
.topic = "branch",
},
{
.name = 0,
.event = 0,
.desc = 0,
},
};
static const struct pmu_metric pmu_metrics__test_soc_cpu[] = {
{
.metric_expr = "1 / IPC",
.metric_name = "CPI",
},
{
.metric_expr = "inst_retired.any / cpu_clk_unhalted.thread",
.metric_name = "IPC",
.metric_group = "group1",
},
{
.metric_expr = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
"( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
.metric_name = "Frontend_Bound_SMT",
},
{
.metric_expr = "l1d\\-loads\\-misses / inst_retired.any",
.metric_name = "dcache_miss_cpi",
},
{
.metric_expr = "l1i\\-loads\\-misses / inst_retired.any",
.metric_name = "icache_miss_cycles",
},
{
.metric_expr = "(dcache_miss_cpi + icache_miss_cycles)",
.metric_name = "cache_miss_cycles",
.metric_group = "group1",
},
{
.metric_expr = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
.metric_name = "DCache_L2_All_Hits",
},
{
.metric_expr = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
"l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
.metric_name = "DCache_L2_All_Miss",
},
{
.metric_expr = "DCache_L2_All_Hits + DCache_L2_All_Miss",
.metric_name = "DCache_L2_All",
},
{
.metric_expr = "d_ratio(DCache_L2_All_Hits, DCache_L2_All)",
.metric_name = "DCache_L2_Hits",
},
{
.metric_expr = "d_ratio(DCache_L2_All_Miss, DCache_L2_All)",
.metric_name = "DCache_L2_Misses",
},
{
.metric_expr = "ipc + M2",
.metric_name = "M1",
},
{
.metric_expr = "ipc + M1",
.metric_name = "M2",
},
{
.metric_expr = "1/M3",
.metric_name = "M3",
},
{
.metric_expr = "64 * l1d.replacement / 1000000000 / duration_time",
.metric_name = "L1D_Cache_Fill_BW",
},
{
.metric_expr = 0,
.metric_name = 0,
},
};
/* Struct used to make the PMU event table implementation opaque to callers. */
struct pmu_events_table {
const struct pmu_event *entries;
};
/* Struct used to make the PMU metric table implementation opaque to callers. */
struct pmu_metrics_table {
const struct pmu_metric *entries;
};
/*
* Map a CPU to its table of PMU events. The CPU is identified by the
* cpuid field, which is an arch-specific identifier for the CPU.
* The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
* must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
*
* The cpuid can contain any character other than the comma.
*/
struct pmu_events_map {
const char *arch;
const char *cpuid;
const struct pmu_events_table event_table;
const struct pmu_metrics_table metric_table;
};
/*
* Global table mapping each known CPU for the architecture to its
* table of PMU events.
*/
static const struct pmu_events_map pmu_events_map[] = {
{
.arch = "testarch",
.cpuid = "testcpu",
.event_table = { pmu_events__test_soc_cpu },
.metric_table = { pmu_metrics__test_soc_cpu },
},
{
.arch = 0,
.cpuid = 0,
.event_table = { 0 },
.metric_table = { 0 },
},
};
static const struct pmu_event pmu_events__test_soc_sys[] = {
{
.name = "sys_ddr_pmu.write_cycles",
.event = "event=0x2b",
.desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
.compat = "v8",
.topic = "uncore",
.pmu = "uncore_sys_ddr_pmu",
},
{
.name = "sys_ccn_pmu.read_cycles",
.event = "config=0x2c",
.desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
.compat = "0x01",
.topic = "uncore",
.pmu = "uncore_sys_ccn_pmu",
},
{
.name = 0,
.event = 0,
.desc = 0,
},
};
struct pmu_sys_events {
const char *name;
const struct pmu_events_table table;
};
static const struct pmu_sys_events pmu_sys_event_tables[] = {
{
.table = { pmu_events__test_soc_sys },
.name = "pmu_events__test_soc_sys",
},
{
.table = { 0 }
},
};
int pmu_events_table__for_each_event(const struct pmu_events_table *table, struct perf_pmu *pmu,
pmu_event_iter_fn fn, void *data)
{
for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
int ret;
if (pmu && !pmu__name_match(pmu, pe->pmu))
continue;
ret = fn(pe, table, data);
if (ret)
return ret;
}
return 0;
}
int pmu_events_table__find_event(const struct pmu_events_table *table,
struct perf_pmu *pmu,
const char *name,
pmu_event_iter_fn fn,
void *data)
{
for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
if (pmu && !pmu__name_match(pmu, pe->pmu))
continue;
if (!strcasecmp(pe->name, name))
return fn(pe, table, data);
}
return -1000;
}
size_t pmu_events_table__num_events(const struct pmu_events_table *table,
struct perf_pmu *pmu)
{
size_t count = 0;
for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
if (pmu && !pmu__name_match(pmu, pe->pmu))
continue;
count++;
}
return count;
}
int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
void *data)
{
for (const struct pmu_metric *pm = &table->entries[0]; pm->metric_expr; pm++) {
int ret = fn(pm, table, data);
if (ret)
return ret;
}
return 0;
}
const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
{
const struct pmu_events_table *table = NULL;
char *cpuid = perf_pmu__getcpuid(pmu);
int i;
/* on some platforms which uses cpus map, cpuid can be NULL for
* PMUs other than CORE PMUs.
*/
if (!cpuid)
return NULL;
i = 0;
for (;;) {
const struct pmu_events_map *map = &pmu_events_map[i++];
if (!map->cpuid)
break;
if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
table = &map->event_table;
break;
}
}
free(cpuid);
return table;
}
const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
{
const struct pmu_metrics_table *table = NULL;
char *cpuid = perf_pmu__getcpuid(pmu);
int i;
/* on some platforms which uses cpus map, cpuid can be NULL for
* PMUs other than CORE PMUs.
*/
if (!cpuid)
return NULL;
i = 0;
for (;;) {
const struct pmu_events_map *map = &pmu_events_map[i++];
if (!map->cpuid)
break;
if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
table = &map->metric_table;
break;
}
}
free(cpuid);
return table;
}
const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
{
for (const struct pmu_events_map *tables = &pmu_events_map[0];
tables->arch;
tables++) {
if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
return &tables->event_table;
}
return NULL;
}
const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
{
for (const struct pmu_events_map *tables = &pmu_events_map[0];
tables->arch;
tables++) {
if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
return &tables->metric_table;
}
return NULL;
}
int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
{
for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) {
int ret = pmu_events_table__for_each_event(&tables->event_table,
/*pmu=*/ NULL, fn, data);
if (ret)
return ret;
}
return 0;
}
int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
{
for (const struct pmu_events_map *tables = &pmu_events_map[0];
tables->arch;
tables++) {
int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
if (ret)
return ret;
}
return 0;
}
const struct pmu_events_table *find_sys_events_table(const char *name)
{
for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
tables->name;
tables++) {
if (!strcmp(tables->name, name))
return &tables->table;
}
return NULL;
}
int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
{
for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
tables->name;
tables++) {
int ret = pmu_events_table__for_each_event(&tables->table, /*pmu=*/ NULL, fn, data);
if (ret)
return ret;
}
return 0;
}
int pmu_for_each_sys_metric(pmu_metric_iter_fn fn __maybe_unused, void *data __maybe_unused)
{
return 0;
}
const char *describe_metricgroup(const char *group __maybe_unused)
{
return NULL;
}
| linux-master | tools/perf/pmu-events/empty-pmu-events.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_EVENTFD_SUPPORT
/*
* Copyright (C) 2018 Davidlohr Bueso.
*
* This program benchmarks concurrent epoll_wait(2) monitoring multiple
* file descriptors under one or two load balancing models. The first,
* and default, is the single/combined queueing (which refers to a single
* epoll instance for N worker threads):
*
* |---> [worker A]
* |---> [worker B]
* [combined queue] .---> [worker C]
* |---> [worker D]
* |---> [worker E]
*
* While the second model, enabled via --multiq option, uses multiple
* queueing (which refers to one epoll instance per worker). For example,
* short lived tcp connections in a high throughput httpd server will
* distribute the accept()'ing connections across CPUs. In this case each
* worker does a limited amount of processing.
*
* [queue A] ---> [worker]
* [queue B] ---> [worker]
* [queue C] ---> [worker]
* [queue D] ---> [worker]
* [queue E] ---> [worker]
*
* Naturally, the single queue will enforce more concurrency on the epoll
* instance, and can therefore scale poorly compared to multiple queues.
* However, this is a benchmark raw data and must be taken with a grain of
* salt when choosing how to make use of sys_epoll.
* Each thread has a number of private, nonblocking file descriptors,
* referred to as fdmap. A writer thread will constantly be writing to
* the fdmaps of all threads, minimizing each threads's chances of
* epoll_wait not finding any ready read events and blocking as this
* is not what we want to stress. The size of the fdmap can be adjusted
* by the user; enlarging the value will increase the chances of
* epoll_wait(2) blocking as the lineal writer thread will take "longer",
* at least at a high level.
*
* Note that because fds are private to each thread, this workload does
* not stress scenarios where multiple tasks are awoken per ready IO; ie:
* EPOLLEXCLUSIVE semantics.
*
* The end result/metric is throughput: number of ops/second where an
* operation consists of:
*
* epoll_wait(2) + [others]
*
* ... where [others] is the cost of re-adding the fd (EPOLLET),
* or rearming it (EPOLLONESHOT).
*
*
* The purpose of this is program is that it be useful for measuring
* kernel related changes to the sys_epoll, and not comparing different
* IO polling methods, for example. Hence everything is very adhoc and
* outputs raw microbenchmark numbers. Also this uses eventfd, similar
* tools tend to use pipes or sockets, but the result is the same.
*/
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <unistd.h>
#include <errno.h>
#include <inttypes.h>
#include <signal.h>
#include <stdlib.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/types.h>
#include <perf/cpumap.h>
#include "../util/stat.h"
#include "../util/mutex.h"
#include <subcmd/parse-options.h>
#include "bench.h"
#include <err.h>
#define printinfo(fmt, arg...) \
do { if (__verbose) { printf(fmt, ## arg); fflush(stdout); } } while (0)
static unsigned int nthreads = 0;
static unsigned int nsecs = 8;
static bool wdone, done, __verbose, randomize, nonblocking;
/*
* epoll related shared variables.
*/
/* Maximum number of nesting allowed inside epoll sets */
#define EPOLL_MAXNESTS 4
static int epollfd;
static int *epollfdp;
static bool noaffinity;
static unsigned int nested = 0;
static bool et; /* edge-trigger */
static bool oneshot;
static bool multiq; /* use an epoll instance per thread */
/* amount of fds to monitor, per thread */
static unsigned int nfds = 64;
static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats throughput_stats;
static struct cond thread_parent, thread_worker;
struct worker {
int tid;
int epollfd; /* for --multiq */
pthread_t thread;
unsigned long ops;
int *fdmap;
};
static const struct option options[] = {
/* general benchmark options */
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
OPT_UINTEGER('f', "nfds", &nfds, "Specify amount of file descriptors to monitor for each thread"),
OPT_BOOLEAN( 'n', "noaffinity", &noaffinity, "Disables CPU affinity"),
OPT_BOOLEAN('R', "randomize", &randomize, "Enable random write behaviour (default is lineal)"),
OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"),
/* epoll specific options */
OPT_BOOLEAN( 'm', "multiq", &multiq, "Use multiple epoll instances (one per thread)"),
OPT_BOOLEAN( 'B', "nonblocking", &nonblocking, "Nonblocking epoll_wait(2) behaviour"),
OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"),
OPT_BOOLEAN( 'S', "oneshot", &oneshot, "Use EPOLLONESHOT semantics"),
OPT_BOOLEAN( 'E', "edge", &et, "Use Edge-triggered interface (default is LT)"),
OPT_END()
};
static const char * const bench_epoll_wait_usage[] = {
"perf bench epoll wait <options>",
NULL
};
/*
* Arrange the N elements of ARRAY in random order.
* Only effective if N is much smaller than RAND_MAX;
* if this may not be the case, use a better random
* number generator. -- Ben Pfaff.
*/
static void shuffle(void *array, size_t n, size_t size)
{
char *carray = array;
void *aux;
size_t i;
if (n <= 1)
return;
aux = calloc(1, size);
if (!aux)
err(EXIT_FAILURE, "calloc");
for (i = 1; i < n; ++i) {
size_t j = i + rand() / (RAND_MAX / (n - i) + 1);
j *= size;
memcpy(aux, &carray[j], size);
memcpy(&carray[j], &carray[i*size], size);
memcpy(&carray[i*size], aux, size);
}
free(aux);
}
static void *workerfn(void *arg)
{
int fd, ret, r;
struct worker *w = (struct worker *) arg;
unsigned long ops = w->ops;
struct epoll_event ev;
uint64_t val;
int to = nonblocking? 0 : -1;
int efd = multiq ? w->epollfd : epollfd;
mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
cond_signal(&thread_parent);
cond_wait(&thread_worker, &thread_lock);
mutex_unlock(&thread_lock);
do {
/*
* Block indefinitely waiting for the IN event.
* In order to stress the epoll_wait(2) syscall,
* call it event per event, instead of a larger
* batch (max)limit.
*/
do {
ret = epoll_wait(efd, &ev, 1, to);
} while (ret < 0 && errno == EINTR);
if (ret < 0)
err(EXIT_FAILURE, "epoll_wait");
fd = ev.data.fd;
do {
r = read(fd, &val, sizeof(val));
} while (!done && (r < 0 && errno == EAGAIN));
if (et) {
ev.events = EPOLLIN | EPOLLET;
ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev);
}
if (oneshot) {
/* rearm the file descriptor with a new event mask */
ev.events |= EPOLLIN | EPOLLONESHOT;
ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &ev);
}
ops++;
} while (!done);
if (multiq)
close(w->epollfd);
w->ops = ops;
return NULL;
}
static void nest_epollfd(struct worker *w)
{
unsigned int i;
struct epoll_event ev;
int efd = multiq ? w->epollfd : epollfd;
if (nested > EPOLL_MAXNESTS)
nested = EPOLL_MAXNESTS;
epollfdp = calloc(nested, sizeof(*epollfdp));
if (!epollfdp)
err(EXIT_FAILURE, "calloc");
for (i = 0; i < nested; i++) {
epollfdp[i] = epoll_create(1);
if (epollfdp[i] < 0)
err(EXIT_FAILURE, "epoll_create");
}
ev.events = EPOLLHUP; /* anything */
ev.data.u64 = i; /* any number */
for (i = nested - 1; i; i--) {
if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD,
epollfdp[i], &ev) < 0)
err(EXIT_FAILURE, "epoll_ctl");
}
if (epoll_ctl(efd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0)
err(EXIT_FAILURE, "epoll_ctl");
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
/* inform all threads that we're done for the day */
done = true;
gettimeofday(&bench__end, NULL);
timersub(&bench__end, &bench__start, &bench__runtime);
}
static void print_summary(void)
{
unsigned long avg = avg_stats(&throughput_stats);
double stddev = stddev_stats(&throughput_stats);
printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
avg, rel_stddev_stats(stddev, avg),
(int)bench__runtime.tv_sec);
}
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t *cpuset;
unsigned int i, j;
int ret = 0, events = EPOLLIN;
int nrcpus;
size_t size;
if (oneshot)
events |= EPOLLONESHOT;
if (et)
events |= EPOLLET;
printinfo("starting worker/consumer %sthreads%s\n",
noaffinity ? "":"CPU affinity ",
nonblocking ? " (nonblocking)":"");
if (!noaffinity)
pthread_attr_init(&thread_attr);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i];
if (multiq) {
w->epollfd = epoll_create(1);
if (w->epollfd < 0)
err(EXIT_FAILURE, "epoll_create");
if (nested)
nest_epollfd(w);
}
w->tid = i;
w->fdmap = calloc(nfds, sizeof(int));
if (!w->fdmap)
return 1;
for (j = 0; j < nfds; j++) {
int efd = multiq ? w->epollfd : epollfd;
struct epoll_event ev;
w->fdmap[j] = eventfd(0, EFD_NONBLOCK);
if (w->fdmap[j] < 0)
err(EXIT_FAILURE, "eventfd");
ev.data.fd = w->fdmap[j];
ev.events = events;
ret = epoll_ctl(efd, EPOLL_CTL_ADD,
w->fdmap[j], &ev);
if (ret < 0)
err(EXIT_FAILURE, "epoll_ctl");
}
if (!noaffinity) {
CPU_ZERO_S(size, cpuset);
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
size, cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
attrp = &thread_attr;
}
ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w);
if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
}
}
CPU_FREE(cpuset);
if (!noaffinity)
pthread_attr_destroy(&thread_attr);
return ret;
}
static void *writerfn(void *p)
{
struct worker *worker = p;
size_t i, j, iter;
const uint64_t val = 1;
ssize_t sz;
struct timespec ts = { .tv_sec = 0,
.tv_nsec = 500 };
printinfo("starting writer-thread: doing %s writes ...\n",
randomize? "random":"lineal");
for (iter = 0; !wdone; iter++) {
if (randomize) {
shuffle((void *)worker, nthreads, sizeof(*worker));
}
for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i];
if (randomize) {
shuffle((void *)w->fdmap, nfds, sizeof(int));
}
for (j = 0; j < nfds; j++) {
do {
sz = write(w->fdmap[j], &val, sizeof(val));
} while (!wdone && (sz < 0 && errno == EAGAIN));
}
}
nanosleep(&ts, NULL);
}
printinfo("exiting writer-thread (total full-loops: %zd)\n", iter);
return NULL;
}
static int cmpworker(const void *p1, const void *p2)
{
struct worker *w1 = (struct worker *) p1;
struct worker *w2 = (struct worker *) p2;
return w1->tid > w2->tid;
}
int bench_epoll_wait(int argc, const char **argv)
{
int ret = 0;
struct sigaction act;
unsigned int i;
struct worker *worker = NULL;
struct perf_cpu_map *cpu;
pthread_t wthread;
struct rlimit rl, prevrl;
argc = parse_options(argc, argv, options, bench_epoll_wait_usage, 0);
if (argc) {
usage_with_options(bench_epoll_wait_usage, options);
exit(EXIT_FAILURE);
}
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
cpu = perf_cpu_map__new(NULL);
if (!cpu)
goto errmem;
/* a single, main epoll instance */
if (!multiq) {
epollfd = epoll_create(1);
if (epollfd < 0)
err(EXIT_FAILURE, "epoll_create");
/*
* Deal with nested epolls, if any.
*/
if (nested)
nest_epollfd(NULL);
}
printinfo("Using %s queue model\n", multiq ? "multi" : "single");
printinfo("Nesting level(s): %d\n", nested);
/* default to the number of CPUs and leave one for the writer pthread */
if (!nthreads)
nthreads = perf_cpu_map__nr(cpu) - 1;
worker = calloc(nthreads, sizeof(*worker));
if (!worker) {
goto errmem;
}
if (getrlimit(RLIMIT_NOFILE, &prevrl))
err(EXIT_FAILURE, "getrlimit");
rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n",
(uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
err(EXIT_FAILURE, "setrlimit");
printf("Run summary [PID %d]: %d threads monitoring%s on "
"%d file-descriptors for %d secs.\n\n",
getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs);
init_stats(&throughput_stats);
mutex_init(&thread_lock);
cond_init(&thread_parent);
cond_init(&thread_worker);
threads_starting = nthreads;
gettimeofday(&bench__start, NULL);
do_threads(worker, cpu);
mutex_lock(&thread_lock);
while (threads_starting)
cond_wait(&thread_parent, &thread_lock);
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
/*
* At this point the workers should be blocked waiting for read events
* to become ready. Launch the writer which will constantly be writing
* to each thread's fdmap.
*/
ret = pthread_create(&wthread, NULL, writerfn,
(void *)(struct worker *) worker);
if (ret)
err(EXIT_FAILURE, "pthread_create");
sleep(nsecs);
toggle_done(0, NULL, NULL);
printinfo("main thread: toggling done\n");
sleep(1); /* meh */
wdone = true;
ret = pthread_join(wthread, NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
/* cleanup & report results */
cond_destroy(&thread_parent);
cond_destroy(&thread_worker);
mutex_destroy(&thread_lock);
/* sort the array back before reporting */
if (randomize)
qsort(worker, nthreads, sizeof(struct worker), cmpworker);
for (i = 0; i < nthreads; i++) {
unsigned long t = bench__runtime.tv_sec > 0 ?
worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
if (nfds == 1)
printf("[thread %2d] fdmap: %p [ %04ld ops/sec ]\n",
worker[i].tid, &worker[i].fdmap[0], t);
else
printf("[thread %2d] fdmap: %p ... %p [ %04ld ops/sec ]\n",
worker[i].tid, &worker[i].fdmap[0],
&worker[i].fdmap[nfds-1], t);
}
print_summary();
close(epollfd);
perf_cpu_map__put(cpu);
for (i = 0; i < nthreads; i++)
free(worker[i].fdmap);
free(worker);
return ret;
errmem:
err(EXIT_FAILURE, "calloc");
}
#endif // HAVE_EVENTFD_SUPPORT
| linux-master | tools/perf/bench/epoll-wait.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* uprobe.c
*
* uprobe benchmarks
*
* Copyright (C) 2023, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "../perf.h"
#include "../util/util.h"
#include <subcmd/parse-options.h>
#include "../builtin.h"
#include "bench.h"
#include <linux/compiler.h>
#include <linux/time64.h>
#include <inttypes.h>
#include <stdio.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#define LOOPS_DEFAULT 1000
static int loops = LOOPS_DEFAULT;
enum bench_uprobe {
BENCH_UPROBE__BASELINE,
BENCH_UPROBE__EMPTY,
BENCH_UPROBE__TRACE_PRINTK,
};
static const struct option options[] = {
OPT_INTEGER('l', "loop", &loops, "Specify number of loops"),
OPT_END()
};
static const char * const bench_uprobe_usage[] = {
"perf bench uprobe <options>",
NULL
};
#ifdef HAVE_BPF_SKEL
#include "bpf_skel/bench_uprobe.skel.h"
#define bench_uprobe__attach_uprobe(prog) \
skel->links.prog = bpf_program__attach_uprobe_opts(/*prog=*/skel->progs.prog, \
/*pid=*/-1, \
/*binary_path=*/"/lib64/libc.so.6", \
/*func_offset=*/0, \
/*opts=*/&uprobe_opts); \
if (!skel->links.prog) { \
err = -errno; \
fprintf(stderr, "Failed to attach bench uprobe \"%s\": %s\n", #prog, strerror(errno)); \
goto cleanup; \
}
struct bench_uprobe_bpf *skel;
static int bench_uprobe__setup_bpf_skel(enum bench_uprobe bench)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
int err;
/* Load and verify BPF application */
skel = bench_uprobe_bpf__open();
if (!skel) {
fprintf(stderr, "Failed to open and load uprobes bench BPF skeleton\n");
return -1;
}
err = bench_uprobe_bpf__load(skel);
if (err) {
fprintf(stderr, "Failed to load and verify BPF skeleton\n");
goto cleanup;
}
uprobe_opts.func_name = "usleep";
switch (bench) {
case BENCH_UPROBE__BASELINE: break;
case BENCH_UPROBE__EMPTY: bench_uprobe__attach_uprobe(empty); break;
case BENCH_UPROBE__TRACE_PRINTK: bench_uprobe__attach_uprobe(trace_printk); break;
default:
fprintf(stderr, "Invalid bench: %d\n", bench);
goto cleanup;
}
return err;
cleanup:
bench_uprobe_bpf__destroy(skel);
return err;
}
static void bench_uprobe__teardown_bpf_skel(void)
{
if (skel) {
bench_uprobe_bpf__destroy(skel);
skel = NULL;
}
}
#else
static int bench_uprobe__setup_bpf_skel(enum bench_uprobe bench __maybe_unused) { return 0; }
static void bench_uprobe__teardown_bpf_skel(void) {};
#endif
static int bench_uprobe_format__default_fprintf(const char *name, const char *unit, u64 diff, FILE *fp)
{
static u64 baseline, previous;
s64 diff_to_baseline = diff - baseline,
diff_to_previous = diff - previous;
int printed = fprintf(fp, "# Executed %'d %s calls\n", loops, name);
printed += fprintf(fp, " %14s: %'" PRIu64 " %ss", "Total time", diff, unit);
if (baseline) {
printed += fprintf(fp, " %s%'" PRId64 " to baseline", diff_to_baseline > 0 ? "+" : "", diff_to_baseline);
if (previous != baseline)
fprintf(stdout, " %s%'" PRId64 " to previous", diff_to_previous > 0 ? "+" : "", diff_to_previous);
}
printed += fprintf(fp, "\n\n %'.3f %ss/op", (double)diff / (double)loops, unit);
if (baseline) {
printed += fprintf(fp, " %'.3f %ss/op to baseline", (double)diff_to_baseline / (double)loops, unit);
if (previous != baseline)
printed += fprintf(fp, " %'.3f %ss/op to previous", (double)diff_to_previous / (double)loops, unit);
} else {
baseline = diff;
}
fputc('\n', fp);
previous = diff;
return printed + 1;
}
static int bench_uprobe(int argc, const char **argv, enum bench_uprobe bench)
{
const char *name = "usleep(1000)", *unit = "usec";
struct timespec start, end;
u64 diff;
int i;
argc = parse_options(argc, argv, options, bench_uprobe_usage, 0);
if (bench != BENCH_UPROBE__BASELINE && bench_uprobe__setup_bpf_skel(bench) < 0)
return 0;
clock_gettime(CLOCK_REALTIME, &start);
for (i = 0; i < loops; i++) {
usleep(USEC_PER_MSEC);
}
clock_gettime(CLOCK_REALTIME, &end);
diff = end.tv_sec * NSEC_PER_SEC + end.tv_nsec - (start.tv_sec * NSEC_PER_SEC + start.tv_nsec);
diff /= NSEC_PER_USEC;
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
bench_uprobe_format__default_fprintf(name, unit, diff, stdout);
break;
case BENCH_FORMAT_SIMPLE:
printf("%" PRIu64 "\n", diff);
break;
default:
/* reaching here is something of a disaster */
fprintf(stderr, "Unknown format:%d\n", bench_format);
exit(1);
}
if (bench != BENCH_UPROBE__BASELINE)
bench_uprobe__teardown_bpf_skel();
return 0;
}
int bench_uprobe_baseline(int argc, const char **argv)
{
return bench_uprobe(argc, argv, BENCH_UPROBE__BASELINE);
}
int bench_uprobe_empty(int argc, const char **argv)
{
return bench_uprobe(argc, argv, BENCH_UPROBE__EMPTY);
}
int bench_uprobe_trace_printk(int argc, const char **argv)
{
return bench_uprobe(argc, argv, BENCH_UPROBE__TRACE_PRINTK);
}
| linux-master | tools/perf/bench/uprobe.c |
// SPDX-License-Identifier: GPL-2.0
#include <subcmd/parse-options.h>
#include "bench.h"
#include <uapi/linux/filter.h>
#include <sys/types.h>
#include <sys/time.h>
#include <linux/unistd.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <linux/time64.h>
#include <linux/seccomp.h>
#include <sys/prctl.h>
#include <unistd.h>
#include <limits.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/wait.h>
#include <string.h>
#include <errno.h>
#include <err.h>
#include <inttypes.h>
#define LOOPS_DEFAULT 1000000UL
static uint64_t loops = LOOPS_DEFAULT;
static bool sync_mode;
static const struct option options[] = {
OPT_U64('l', "loop", &loops, "Specify number of loops"),
OPT_BOOLEAN('s', "sync-mode", &sync_mode,
"Enable the synchronious mode for seccomp notifications"),
OPT_END()
};
static const char * const bench_seccomp_usage[] = {
"perf bench sched secccomp-notify <options>",
NULL
};
static int seccomp(unsigned int op, unsigned int flags, void *args)
{
return syscall(__NR_seccomp, op, flags, args);
}
static int user_notif_syscall(int nr, unsigned int flags)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, nr, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_USER_NOTIF),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
}
#define USER_NOTIF_MAGIC INT_MAX
static void user_notification_sync_loop(int listener)
{
struct seccomp_notif_resp resp;
struct seccomp_notif req;
uint64_t nr;
for (nr = 0; nr < loops; nr++) {
memset(&req, 0, sizeof(req));
if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req))
err(EXIT_FAILURE, "SECCOMP_IOCTL_NOTIF_RECV failed");
if (req.data.nr != __NR_gettid)
errx(EXIT_FAILURE, "unexpected syscall: %d", req.data.nr);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
resp.flags = 0;
if (ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp))
err(EXIT_FAILURE, "SECCOMP_IOCTL_NOTIF_SEND failed");
}
}
#ifndef SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP
#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
#endif
int bench_sched_seccomp_notify(int argc, const char **argv)
{
struct timeval start, stop, diff;
unsigned long long result_usec = 0;
int status, listener;
pid_t pid;
long ret;
argc = parse_options(argc, argv, options, bench_seccomp_usage, 0);
gettimeofday(&start, NULL);
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
listener = user_notif_syscall(__NR_gettid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
if (listener < 0)
err(EXIT_FAILURE, "can't create a notification descriptor");
pid = fork();
if (pid < 0)
err(EXIT_FAILURE, "fork");
if (pid == 0) {
if (prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0))
err(EXIT_FAILURE, "can't set the parent death signal");
while (1) {
ret = syscall(__NR_gettid);
if (ret == USER_NOTIF_MAGIC)
continue;
break;
}
_exit(1);
}
if (sync_mode) {
if (ioctl(listener, SECCOMP_IOCTL_NOTIF_SET_FLAGS,
SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP, 0))
err(EXIT_FAILURE,
"can't set SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP");
}
user_notification_sync_loop(listener);
kill(pid, SIGKILL);
if (waitpid(pid, &status, 0) != pid)
err(EXIT_FAILURE, "waitpid(%d) failed", pid);
if (!WIFSIGNALED(status) || WTERMSIG(status) != SIGKILL)
errx(EXIT_FAILURE, "unexpected exit code: %d", status);
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
printf("# Executed %" PRIu64 " system calls\n\n",
loops);
result_usec = diff.tv_sec * USEC_PER_SEC;
result_usec += diff.tv_usec;
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
(unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
printf(" %14lf usecs/op\n",
(double)result_usec / (double)loops);
printf(" %14d ops/sec\n",
(int)((double)loops /
((double)result_usec / (double)USEC_PER_SEC)));
break;
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n",
(unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
break;
default:
/* reaching here is something disaster */
fprintf(stderr, "Unknown format:%d\n", bench_format);
exit(1);
break;
}
return 0;
}
| linux-master | tools/perf/bench/sched-seccomp-notify.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <stddef.h>
#include <ftw.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/zalloc.h>
#include <internal/lib.h>
#include <subcmd/parse-options.h>
#include "bench.h"
#include "util/data.h"
#include "util/stat.h"
#include "util/debug.h"
#include "util/symbol.h"
#include "util/session.h"
#include "util/build-id.h"
#include "util/sample.h"
#include "util/synthetic-events.h"
#define MMAP_DEV_MAJOR 8
#define DSO_MMAP_RATIO 4
static unsigned int iterations = 100;
static unsigned int nr_mmaps = 100;
static unsigned int nr_samples = 100; /* samples per mmap */
static u64 bench_sample_type;
static u16 bench_id_hdr_size;
struct bench_data {
int pid;
int input_pipe[2];
int output_pipe[2];
pthread_t th;
};
struct bench_dso {
struct list_head list;
char *name;
int ino;
};
static int nr_dsos;
static struct bench_dso *dsos;
extern int cmd_inject(int argc, const char *argv[]);
static const struct option options[] = {
OPT_UINTEGER('i', "iterations", &iterations,
"Number of iterations used to compute average (default: 100)"),
OPT_UINTEGER('m', "nr-mmaps", &nr_mmaps,
"Number of mmap events for each iteration (default: 100)"),
OPT_UINTEGER('n', "nr-samples", &nr_samples,
"Number of sample events per mmap event (default: 100)"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show iteration count, DSO name, etc)"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench internals inject-build-id <options>",
NULL
};
/*
* Helper for collect_dso that adds the given file as a dso to dso_list
* if it contains a build-id. Stops after collecting 4 times more than
* we need (for MMAP2 events).
*/
static int add_dso(const char *fpath, const struct stat *sb __maybe_unused,
int typeflag, struct FTW *ftwbuf __maybe_unused)
{
struct bench_dso *dso = &dsos[nr_dsos];
struct build_id bid;
if (typeflag == FTW_D || typeflag == FTW_SL)
return 0;
if (filename__read_build_id(fpath, &bid) < 0)
return 0;
dso->name = realpath(fpath, NULL);
if (dso->name == NULL)
return -1;
dso->ino = nr_dsos++;
pr_debug2(" Adding DSO: %s\n", fpath);
/* stop if we collected enough DSOs */
if ((unsigned int)nr_dsos == DSO_MMAP_RATIO * nr_mmaps)
return 1;
return 0;
}
static void collect_dso(void)
{
dsos = calloc(nr_mmaps * DSO_MMAP_RATIO, sizeof(*dsos));
if (dsos == NULL) {
printf(" Memory allocation failed\n");
exit(1);
}
if (nftw("/usr/lib/", add_dso, 10, FTW_PHYS) < 0)
return;
pr_debug(" Collected %d DSOs\n", nr_dsos);
}
static void release_dso(void)
{
int i;
for (i = 0; i < nr_dsos; i++) {
struct bench_dso *dso = &dsos[i];
zfree(&dso->name);
}
free(dsos);
}
/* Fake address used by mmap and sample events */
static u64 dso_map_addr(struct bench_dso *dso)
{
return 0x400000ULL + dso->ino * 8192ULL;
}
static ssize_t synthesize_attr(struct bench_data *data)
{
union perf_event event;
memset(&event, 0, sizeof(event.attr) + sizeof(u64));
event.header.type = PERF_RECORD_HEADER_ATTR;
event.header.size = sizeof(event.attr) + sizeof(u64);
event.attr.attr.type = PERF_TYPE_SOFTWARE;
event.attr.attr.config = PERF_COUNT_SW_TASK_CLOCK;
event.attr.attr.exclude_kernel = 1;
event.attr.attr.sample_id_all = 1;
event.attr.attr.sample_type = bench_sample_type;
return writen(data->input_pipe[1], &event, event.header.size);
}
static ssize_t synthesize_fork(struct bench_data *data)
{
union perf_event event;
memset(&event, 0, sizeof(event.fork) + bench_id_hdr_size);
event.header.type = PERF_RECORD_FORK;
event.header.misc = PERF_RECORD_MISC_FORK_EXEC;
event.header.size = sizeof(event.fork) + bench_id_hdr_size;
event.fork.ppid = 1;
event.fork.ptid = 1;
event.fork.pid = data->pid;
event.fork.tid = data->pid;
return writen(data->input_pipe[1], &event, event.header.size);
}
static ssize_t synthesize_mmap(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
{
union perf_event event;
size_t len = offsetof(struct perf_record_mmap2, filename);
u64 *id_hdr_ptr = (void *)&event;
int ts_idx;
len += roundup(strlen(dso->name) + 1, 8) + bench_id_hdr_size;
memset(&event, 0, min(len, sizeof(event.mmap2)));
event.header.type = PERF_RECORD_MMAP2;
event.header.misc = PERF_RECORD_MISC_USER;
event.header.size = len;
event.mmap2.pid = data->pid;
event.mmap2.tid = data->pid;
event.mmap2.maj = MMAP_DEV_MAJOR;
event.mmap2.ino = dso->ino;
strcpy(event.mmap2.filename, dso->name);
event.mmap2.start = dso_map_addr(dso);
event.mmap2.len = 4096;
event.mmap2.prot = PROT_EXEC;
if (len > sizeof(event.mmap2)) {
/* write mmap2 event first */
if (writen(data->input_pipe[1], &event, len - bench_id_hdr_size) < 0)
return -1;
/* zero-fill sample id header */
memset(id_hdr_ptr, 0, bench_id_hdr_size);
/* put timestamp in the right position */
ts_idx = (bench_id_hdr_size / sizeof(u64)) - 2;
id_hdr_ptr[ts_idx] = timestamp;
if (writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size) < 0)
return -1;
return len;
}
ts_idx = (len / sizeof(u64)) - 2;
id_hdr_ptr[ts_idx] = timestamp;
return writen(data->input_pipe[1], &event, len);
}
static ssize_t synthesize_sample(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
{
union perf_event event;
struct perf_sample sample = {
.tid = data->pid,
.pid = data->pid,
.ip = dso_map_addr(dso),
.time = timestamp,
};
event.header.type = PERF_RECORD_SAMPLE;
event.header.misc = PERF_RECORD_MISC_USER;
event.header.size = perf_event__sample_event_size(&sample, bench_sample_type, 0);
perf_event__synthesize_sample(&event, bench_sample_type, 0, &sample);
return writen(data->input_pipe[1], &event, event.header.size);
}
static ssize_t synthesize_flush(struct bench_data *data)
{
struct perf_event_header header = {
.size = sizeof(header),
.type = PERF_RECORD_FINISHED_ROUND,
};
return writen(data->input_pipe[1], &header, header.size);
}
static void *data_reader(void *arg)
{
struct bench_data *data = arg;
char buf[8192];
int flag;
int n;
flag = fcntl(data->output_pipe[0], F_GETFL);
fcntl(data->output_pipe[0], F_SETFL, flag | O_NONBLOCK);
/* read out data from child */
while (true) {
n = read(data->output_pipe[0], buf, sizeof(buf));
if (n > 0)
continue;
if (n == 0)
break;
if (errno != EINTR && errno != EAGAIN)
break;
usleep(100);
}
close(data->output_pipe[0]);
return NULL;
}
static int setup_injection(struct bench_data *data, bool build_id_all)
{
int ready_pipe[2];
int dev_null_fd;
char buf;
if (pipe(ready_pipe) < 0)
return -1;
if (pipe(data->input_pipe) < 0)
return -1;
if (pipe(data->output_pipe) < 0)
return -1;
data->pid = fork();
if (data->pid < 0)
return -1;
if (data->pid == 0) {
const char **inject_argv;
int inject_argc = 2;
close(data->input_pipe[1]);
close(data->output_pipe[0]);
close(ready_pipe[0]);
dup2(data->input_pipe[0], STDIN_FILENO);
close(data->input_pipe[0]);
dup2(data->output_pipe[1], STDOUT_FILENO);
close(data->output_pipe[1]);
dev_null_fd = open("/dev/null", O_WRONLY);
if (dev_null_fd < 0)
exit(1);
dup2(dev_null_fd, STDERR_FILENO);
if (build_id_all)
inject_argc++;
inject_argv = calloc(inject_argc + 1, sizeof(*inject_argv));
if (inject_argv == NULL)
exit(1);
inject_argv[0] = strdup("inject");
inject_argv[1] = strdup("-b");
if (build_id_all)
inject_argv[2] = strdup("--buildid-all");
/* signal that we're ready to go */
close(ready_pipe[1]);
cmd_inject(inject_argc, inject_argv);
exit(0);
}
pthread_create(&data->th, NULL, data_reader, data);
close(ready_pipe[1]);
close(data->input_pipe[0]);
close(data->output_pipe[1]);
/* wait for child ready */
if (read(ready_pipe[0], &buf, 1) < 0)
return -1;
close(ready_pipe[0]);
return 0;
}
static int inject_build_id(struct bench_data *data, u64 *max_rss)
{
int status;
unsigned int i, k;
struct rusage rusage;
/* this makes the child to run */
if (perf_header__write_pipe(data->input_pipe[1]) < 0)
return -1;
if (synthesize_attr(data) < 0)
return -1;
if (synthesize_fork(data) < 0)
return -1;
for (i = 0; i < nr_mmaps; i++) {
int idx = rand() % (nr_dsos - 1);
struct bench_dso *dso = &dsos[idx];
u64 timestamp = rand() % 1000000;
pr_debug2(" [%d] injecting: %s\n", i+1, dso->name);
if (synthesize_mmap(data, dso, timestamp) < 0)
return -1;
for (k = 0; k < nr_samples; k++) {
if (synthesize_sample(data, dso, timestamp + k * 1000) < 0)
return -1;
}
if ((i + 1) % 10 == 0) {
if (synthesize_flush(data) < 0)
return -1;
}
}
/* this makes the child to finish */
close(data->input_pipe[1]);
wait4(data->pid, &status, 0, &rusage);
*max_rss = rusage.ru_maxrss;
pr_debug(" Child %d exited with %d\n", data->pid, status);
return 0;
}
static void do_inject_loop(struct bench_data *data, bool build_id_all)
{
unsigned int i;
struct stats time_stats, mem_stats;
double time_average, time_stddev;
double mem_average, mem_stddev;
init_stats(&time_stats);
init_stats(&mem_stats);
pr_debug(" Build-id%s injection benchmark\n", build_id_all ? "-all" : "");
for (i = 0; i < iterations; i++) {
struct timeval start, end, diff;
u64 runtime_us, max_rss;
pr_debug(" Iteration #%d\n", i+1);
if (setup_injection(data, build_id_all) < 0) {
printf(" Build-id injection setup failed\n");
break;
}
gettimeofday(&start, NULL);
if (inject_build_id(data, &max_rss) < 0) {
printf(" Build-id injection failed\n");
break;
}
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&time_stats, runtime_us);
update_stats(&mem_stats, max_rss);
pthread_join(data->th, NULL);
}
time_average = avg_stats(&time_stats) / USEC_PER_MSEC;
time_stddev = stddev_stats(&time_stats) / USEC_PER_MSEC;
printf(" Average build-id%s injection took: %.3f msec (+- %.3f msec)\n",
build_id_all ? "-all" : "", time_average, time_stddev);
/* each iteration, it processes MMAP2 + BUILD_ID + nr_samples * SAMPLE */
time_average = avg_stats(&time_stats) / (nr_mmaps * (nr_samples + 2));
time_stddev = stddev_stats(&time_stats) / (nr_mmaps * (nr_samples + 2));
printf(" Average time per event: %.3f usec (+- %.3f usec)\n",
time_average, time_stddev);
mem_average = avg_stats(&mem_stats);
mem_stddev = stddev_stats(&mem_stats);
printf(" Average memory usage: %.0f KB (+- %.0f KB)\n",
mem_average, mem_stddev);
}
static int do_inject_loops(struct bench_data *data)
{
srand(time(NULL));
symbol__init(NULL);
bench_sample_type = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP;
bench_sample_type |= PERF_SAMPLE_TID | PERF_SAMPLE_TIME;
bench_id_hdr_size = 32;
collect_dso();
if (nr_dsos == 0) {
printf(" Cannot collect DSOs for injection\n");
return -1;
}
do_inject_loop(data, false);
do_inject_loop(data, true);
release_dso();
return 0;
}
int bench_inject_build_id(int argc, const char **argv)
{
struct bench_data data;
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
return do_inject_loops(&data);
}
| linux-master | tools/perf/bench/inject-buildid.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.