python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Davidlohr Bueso <[email protected]>
*
* futex-wake: Block a bunch of threads on a futex and wake'em up, N at a time.
*
* This program is particularly useful to measure the latency of nthread wakeups
* in non-error situations: all waiters are queued and all wake calls wakeup
* one or more tasks, and thus the waitqueue is never empty.
*/
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <signal.h>
#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/mman.h>
/* all threads will block on the same futex */
static u_int32_t futex1 = 0;
static pthread_t *worker;
static bool done = false;
static struct mutex thread_lock;
static struct cond thread_parent, thread_worker;
static struct stats waketime_stats, wakeup_stats;
static unsigned int threads_starting;
static int futex_flag = 0;
static struct bench_futex_parameters params = {
/*
* How many wakeups to do at a time.
* Default to 1 in order to make the kernel work more.
*/
.nwakes = 1,
};
static const struct option options[] = {
OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakes", ¶ms.nwakes, "Specify amount of threads to wake at once"),
OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"),
OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"),
OPT_END()
};
static const char * const bench_futex_wake_usage[] = {
"perf bench futex wake <options>",
NULL
};
static void *workerfn(void *arg __maybe_unused)
{
mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
cond_signal(&thread_parent);
cond_wait(&thread_worker, &thread_lock);
mutex_unlock(&thread_lock);
while (1) {
if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
break;
}
pthread_exit(NULL);
return NULL;
}
static void print_summary(void)
{
double waketime_avg = avg_stats(&waketime_stats);
double waketime_stddev = stddev_stats(&waketime_stats);
unsigned int wakeup_avg = avg_stats(&wakeup_stats);
printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n",
wakeup_avg,
params.nthreads,
waketime_avg / USEC_PER_MSEC,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
{
cpu_set_t *cpuset;
unsigned int i;
size_t size;
int nrcpus = perf_cpu_map__nr(cpu);
threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */
for (i = 0; i < params.nthreads; i++) {
pthread_attr_t thread_attr;
pthread_attr_init(&thread_attr);
CPU_ZERO_S(size, cpuset);
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
}
pthread_attr_destroy(&thread_attr);
}
CPU_FREE(cpuset);
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_wake(int argc, const char **argv)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
if (argc) {
usage_with_options(bench_futex_wake_usage, options);
exit(EXIT_FAILURE);
}
cpu = perf_cpu_map__new(NULL);
if (!cpu)
err(EXIT_FAILURE, "calloc");
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (params.mlockall) {
if (mlockall(MCL_CURRENT | MCL_FUTURE))
err(EXIT_FAILURE, "mlockall");
}
if (!params.nthreads)
params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), "
"waking up %d at a time.\n\n",
getpid(), params.nthreads, params.fshared ? "shared":"private",
&futex1, params.nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
mutex_init(&thread_lock);
cond_init(&thread_parent);
cond_init(&thread_worker);
for (j = 0; j < bench_repeat && !done; j++) {
unsigned int nwoken = 0;
struct timeval start, end, runtime;
/* create, launch & block all threads */
block_threads(worker, cpu);
/* make sure all threads are already blocked */
mutex_lock(&thread_lock);
while (threads_starting)
cond_wait(&thread_parent, &thread_lock);
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start waking folks up */
gettimeofday(&start, NULL);
while (nwoken != params.nthreads)
nwoken += futex_wake(&futex1,
params.nwakes, futex_flag);
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
update_stats(&wakeup_stats, nwoken);
update_stats(&waketime_stats, runtime.tv_usec);
if (!params.silent) {
printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n",
j + 1, nwoken, params.nthreads,
runtime.tv_usec / (double)USEC_PER_MSEC);
}
for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
}
/* cleanup & report results */
cond_destroy(&thread_parent);
cond_destroy(&thread_worker);
mutex_destroy(&thread_lock);
print_summary();
free(worker);
perf_cpu_map__put(cpu);
return ret;
}
| linux-master | tools/perf/bench/futex-wake.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mem-memcpy.c
*
* Simple memcpy() and memset() benchmarks
*
* Written by Hitoshi Mitake <[email protected]>
*/
#include "debug.h"
#include "../perf-sys.h"
#include <subcmd/parse-options.h>
#include "../util/header.h"
#include "../util/cloexec.h"
#include "../util/string2.h"
#include "bench.h"
#include "mem-memcpy-arch.h"
#include "mem-memset-arch.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include <errno.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#define K 1024
static const char *size_str = "1MB";
static const char *function_str = "all";
static int nr_loops = 1;
static bool use_cycles;
static int cycles_fd;
static const struct option options[] = {
OPT_STRING('s', "size", &size_str, "1MB",
"Specify the size of the memory buffers. "
"Available units: B, KB, MB, GB and TB (case insensitive)"),
OPT_STRING('f', "function", &function_str, "all",
"Specify the function to run, \"all\" runs all available functions, \"help\" lists them"),
OPT_INTEGER('l', "nr_loops", &nr_loops,
"Specify the number of loops to run. (default: 1)"),
OPT_BOOLEAN('c', "cycles", &use_cycles,
"Use a cycles event instead of gettimeofday() to measure performance"),
OPT_END()
};
typedef void *(*memcpy_t)(void *, const void *, size_t);
typedef void *(*memset_t)(void *, int, size_t);
struct function {
const char *name;
const char *desc;
union {
memcpy_t memcpy;
memset_t memset;
} fn;
};
static struct perf_event_attr cycle_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES
};
static int init_cycles(void)
{
cycles_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, perf_event_open_cloexec_flag());
if (cycles_fd < 0 && errno == ENOSYS) {
pr_debug("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
return -1;
}
return cycles_fd;
}
static u64 get_cycles(void)
{
int ret;
u64 clk;
ret = read(cycles_fd, &clk, sizeof(u64));
BUG_ON(ret != sizeof(u64));
return clk;
}
static double timeval2double(struct timeval *ts)
{
return (double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC;
}
#define print_bps(x) do { \
if (x < K) \
printf(" %14lf bytes/sec\n", x); \
else if (x < K * K) \
printf(" %14lfd KB/sec\n", x / K); \
else if (x < K * K * K) \
printf(" %14lf MB/sec\n", x / K / K); \
else \
printf(" %14lf GB/sec\n", x / K / K / K); \
} while (0)
struct bench_mem_info {
const struct function *functions;
u64 (*do_cycles)(const struct function *r, size_t size, void *src, void *dst);
double (*do_gettimeofday)(const struct function *r, size_t size, void *src, void *dst);
const char *const *usage;
bool alloc_src;
};
static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t size, double size_total)
{
const struct function *r = &info->functions[r_idx];
double result_bps = 0.0;
u64 result_cycles = 0;
void *src = NULL, *dst = zalloc(size);
printf("# function '%s' (%s)\n", r->name, r->desc);
if (dst == NULL)
goto out_alloc_failed;
if (info->alloc_src) {
src = zalloc(size);
if (src == NULL)
goto out_alloc_failed;
}
if (bench_format == BENCH_FORMAT_DEFAULT)
printf("# Copying %s bytes ...\n\n", size_str);
if (use_cycles) {
result_cycles = info->do_cycles(r, size, src, dst);
} else {
result_bps = info->do_gettimeofday(r, size, src, dst);
}
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
if (use_cycles) {
printf(" %14lf cycles/byte\n", (double)result_cycles/size_total);
} else {
print_bps(result_bps);
}
break;
case BENCH_FORMAT_SIMPLE:
if (use_cycles) {
printf("%lf\n", (double)result_cycles/size_total);
} else {
printf("%lf\n", result_bps);
}
break;
default:
BUG_ON(1);
break;
}
out_free:
free(src);
free(dst);
return;
out_alloc_failed:
printf("# Memory allocation failed - maybe size (%s) is too large?\n", size_str);
goto out_free;
}
static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *info)
{
int i;
size_t size;
double size_total;
argc = parse_options(argc, argv, options, info->usage, 0);
if (use_cycles) {
i = init_cycles();
if (i < 0) {
fprintf(stderr, "Failed to open cycles counter\n");
return i;
}
}
size = (size_t)perf_atoll((char *)size_str);
size_total = (double)size * nr_loops;
if ((s64)size <= 0) {
fprintf(stderr, "Invalid size:%s\n", size_str);
return 1;
}
if (!strncmp(function_str, "all", 3)) {
for (i = 0; info->functions[i].name; i++)
__bench_mem_function(info, i, size, size_total);
return 0;
}
for (i = 0; info->functions[i].name; i++) {
if (!strcmp(info->functions[i].name, function_str))
break;
}
if (!info->functions[i].name) {
if (strcmp(function_str, "help") && strcmp(function_str, "h"))
printf("Unknown function: %s\n", function_str);
printf("Available functions:\n");
for (i = 0; info->functions[i].name; i++) {
printf("\t%s ... %s\n",
info->functions[i].name, info->functions[i].desc);
}
return 1;
}
__bench_mem_function(info, i, size, size_total);
return 0;
}
static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
{
/* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
memset(src, 0, size);
/*
* We prefault the freshly allocated memory range here,
* to not measure page fault overhead:
*/
fn(dst, src, size);
}
static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
{
u64 cycle_start = 0ULL, cycle_end = 0ULL;
memcpy_t fn = r->fn.memcpy;
int i;
memcpy_prefault(fn, size, src, dst);
cycle_start = get_cycles();
for (i = 0; i < nr_loops; ++i)
fn(dst, src, size);
cycle_end = get_cycles();
return cycle_end - cycle_start;
}
static double do_memcpy_gettimeofday(const struct function *r, size_t size, void *src, void *dst)
{
struct timeval tv_start, tv_end, tv_diff;
memcpy_t fn = r->fn.memcpy;
int i;
memcpy_prefault(fn, size, src, dst);
BUG_ON(gettimeofday(&tv_start, NULL));
for (i = 0; i < nr_loops; ++i)
fn(dst, src, size);
BUG_ON(gettimeofday(&tv_end, NULL));
timersub(&tv_end, &tv_start, &tv_diff);
return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
}
struct function memcpy_functions[] = {
{ .name = "default",
.desc = "Default memcpy() provided by glibc",
.fn.memcpy = memcpy },
#ifdef HAVE_ARCH_X86_64_SUPPORT
# define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
# include "mem-memcpy-x86-64-asm-def.h"
# undef MEMCPY_FN
#endif
{ .name = NULL, }
};
static const char * const bench_mem_memcpy_usage[] = {
"perf bench mem memcpy <options>",
NULL
};
int bench_mem_memcpy(int argc, const char **argv)
{
struct bench_mem_info info = {
.functions = memcpy_functions,
.do_cycles = do_memcpy_cycles,
.do_gettimeofday = do_memcpy_gettimeofday,
.usage = bench_mem_memcpy_usage,
.alloc_src = true,
};
return bench_mem_common(argc, argv, &info);
}
static u64 do_memset_cycles(const struct function *r, size_t size, void *src __maybe_unused, void *dst)
{
u64 cycle_start = 0ULL, cycle_end = 0ULL;
memset_t fn = r->fn.memset;
int i;
/*
* We prefault the freshly allocated memory range here,
* to not measure page fault overhead:
*/
fn(dst, -1, size);
cycle_start = get_cycles();
for (i = 0; i < nr_loops; ++i)
fn(dst, i, size);
cycle_end = get_cycles();
return cycle_end - cycle_start;
}
static double do_memset_gettimeofday(const struct function *r, size_t size, void *src __maybe_unused, void *dst)
{
struct timeval tv_start, tv_end, tv_diff;
memset_t fn = r->fn.memset;
int i;
/*
* We prefault the freshly allocated memory range here,
* to not measure page fault overhead:
*/
fn(dst, -1, size);
BUG_ON(gettimeofday(&tv_start, NULL));
for (i = 0; i < nr_loops; ++i)
fn(dst, i, size);
BUG_ON(gettimeofday(&tv_end, NULL));
timersub(&tv_end, &tv_start, &tv_diff);
return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
}
static const char * const bench_mem_memset_usage[] = {
"perf bench mem memset <options>",
NULL
};
static const struct function memset_functions[] = {
{ .name = "default",
.desc = "Default memset() provided by glibc",
.fn.memset = memset },
#ifdef HAVE_ARCH_X86_64_SUPPORT
# define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
# include "mem-memset-x86-64-asm-def.h"
# undef MEMSET_FN
#endif
{ .name = NULL, }
};
int bench_mem_memset(int argc, const char **argv)
{
struct bench_mem_info info = {
.functions = memset_functions,
.do_cycles = do_memset_cycles,
.do_gettimeofday = do_memset_gettimeofday,
.usage = bench_mem_memset_usage,
};
return bench_mem_common(argc, argv, &info);
}
| linux-master | tools/perf/bench/mem-functions.c |
/*
*
* syscall.c
*
* syscall: Benchmark for system call performance
*/
#include "../perf.h"
#include "../util/util.h"
#include <subcmd/parse-options.h>
#include "../builtin.h"
#include "bench.h"
#include <stdio.h>
#include <sys/time.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <stdlib.h>
#ifndef __NR_fork
#define __NR_fork -1
#endif
#define LOOPS_DEFAULT 10000000
static int loops = LOOPS_DEFAULT;
static const struct option options[] = {
OPT_INTEGER('l', "loop", &loops, "Specify number of loops"),
OPT_END()
};
static const char * const bench_syscall_usage[] = {
"perf bench syscall <options>",
NULL
};
static void test_fork(void)
{
pid_t pid = fork();
if (pid < 0) {
fprintf(stderr, "fork failed\n");
exit(1);
} else if (pid == 0) {
exit(0);
} else {
if (waitpid(pid, NULL, 0) < 0) {
fprintf(stderr, "waitpid failed\n");
exit(1);
}
}
}
static void test_execve(void)
{
const char *pathname = "/bin/true";
char *const argv[] = { (char *)pathname, NULL };
pid_t pid = fork();
if (pid < 0) {
fprintf(stderr, "fork failed\n");
exit(1);
} else if (pid == 0) {
execve(pathname, argv, NULL);
fprintf(stderr, "execve /bin/true failed\n");
exit(1);
} else {
if (waitpid(pid, NULL, 0) < 0) {
fprintf(stderr, "waitpid failed\n");
exit(1);
}
}
}
static int bench_syscall_common(int argc, const char **argv, int syscall)
{
struct timeval start, stop, diff;
unsigned long long result_usec = 0;
const char *name = NULL;
int i;
argc = parse_options(argc, argv, options, bench_syscall_usage, 0);
gettimeofday(&start, NULL);
for (i = 0; i < loops; i++) {
switch (syscall) {
case __NR_getppid:
getppid();
break;
case __NR_getpgid:
getpgid(0);
break;
case __NR_fork:
test_fork();
/* Only loop 10000 times to save time */
if (i == 10000)
loops = 10000;
break;
case __NR_execve:
test_execve();
/* Only loop 10000 times to save time */
if (i == 10000)
loops = 10000;
break;
default:
break;
}
}
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
switch (syscall) {
case __NR_getppid:
name = "getppid()";
break;
case __NR_getpgid:
name = "getpgid()";
break;
case __NR_fork:
name = "fork()";
break;
case __NR_execve:
name = "execve()";
break;
default:
break;
}
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
printf("# Executed %'d %s calls\n", loops, name);
result_usec = diff.tv_sec * 1000000;
result_usec += diff.tv_usec;
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
(unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec/1000));
printf(" %14lf usecs/op\n",
(double)result_usec / (double)loops);
printf(" %'14d ops/sec\n",
(int)((double)loops /
((double)result_usec / (double)1000000)));
break;
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n",
(unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / 1000));
break;
default:
/* reaching here is something disaster */
fprintf(stderr, "Unknown format:%d\n", bench_format);
exit(1);
break;
}
return 0;
}
int bench_syscall_basic(int argc, const char **argv)
{
return bench_syscall_common(argc, argv, __NR_getppid);
}
int bench_syscall_getpgid(int argc, const char **argv)
{
return bench_syscall_common(argc, argv, __NR_getpgid);
}
int bench_syscall_fork(int argc, const char **argv)
{
return bench_syscall_common(argc, argv, __NR_fork);
}
int bench_syscall_execve(int argc, const char **argv)
{
return bench_syscall_common(argc, argv, __NR_execve);
}
| linux-master | tools/perf/bench/syscall.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Davidlohr Bueso.
*/
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <signal.h>
#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <errno.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/mman.h>
struct worker {
int tid;
u_int32_t *futex;
pthread_t thread;
unsigned long ops;
};
static u_int32_t global_futex = 0;
static struct worker *worker;
static bool done = false;
static int futex_flag = 0;
static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats throughput_stats;
static struct cond thread_parent, thread_worker;
static struct bench_futex_parameters params = {
.runtime = 10,
};
static const struct option options[] = {
OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"),
OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"),
OPT_BOOLEAN( 'M', "multi", ¶ms.multi, "Use multiple futexes"),
OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"),
OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"),
OPT_END()
};
static const char * const bench_futex_lock_pi_usage[] = {
"perf bench futex lock-pi <options>",
NULL
};
static void print_summary(void)
{
unsigned long avg = avg_stats(&throughput_stats);
double stddev = stddev_stats(&throughput_stats);
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
!params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
(int)bench__runtime.tv_sec);
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
/* inform all threads that we're done for the day */
done = true;
gettimeofday(&bench__end, NULL);
timersub(&bench__end, &bench__start, &bench__runtime);
}
static void *workerfn(void *arg)
{
struct worker *w = (struct worker *) arg;
unsigned long ops = w->ops;
mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
cond_signal(&thread_parent);
cond_wait(&thread_worker, &thread_lock);
mutex_unlock(&thread_lock);
do {
int ret;
again:
ret = futex_lock_pi(w->futex, NULL, futex_flag);
if (ret) { /* handle lock acquisition */
if (!params.silent)
warn("thread %d: Could not lock pi-lock for %p (%d)",
w->tid, w->futex, ret);
if (done)
break;
goto again;
}
usleep(1);
ret = futex_unlock_pi(w->futex, futex_flag);
if (ret && !params.silent)
warn("thread %d: Could not unlock pi-lock for %p (%d)",
w->tid, w->futex, ret);
ops++; /* account for thread's share of work */
} while (!done);
w->ops = ops;
return NULL;
}
static void create_threads(struct worker *w, struct perf_cpu_map *cpu)
{
cpu_set_t *cpuset;
unsigned int i;
int nrcpus = perf_cpu_map__nr(cpu);
size_t size;
threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < params.nthreads; i++) {
pthread_attr_t thread_attr;
pthread_attr_init(&thread_attr);
worker[i].tid = i;
if (params.multi) {
worker[i].futex = calloc(1, sizeof(u_int32_t));
if (!worker[i].futex)
err(EXIT_FAILURE, "calloc");
} else
worker[i].futex = &global_futex;
CPU_ZERO_S(size, cpuset);
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
}
pthread_attr_destroy(&thread_attr);
}
CPU_FREE(cpuset);
}
int bench_futex_lock_pi(int argc, const char **argv)
{
int ret = 0;
unsigned int i;
struct sigaction act;
struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
if (argc)
goto err;
cpu = perf_cpu_map__new(NULL);
if (!cpu)
err(EXIT_FAILURE, "calloc");
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (params.mlockall) {
if (mlockall(MCL_CURRENT | MCL_FUTURE))
err(EXIT_FAILURE, "mlockall");
}
if (!params.nthreads)
params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
getpid(), params.nthreads, params.runtime);
init_stats(&throughput_stats);
mutex_init(&thread_lock);
cond_init(&thread_parent);
cond_init(&thread_worker);
threads_starting = params.nthreads;
gettimeofday(&bench__start, NULL);
create_threads(worker, cpu);
mutex_lock(&thread_lock);
while (threads_starting)
cond_wait(&thread_parent, &thread_lock);
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
sleep(params.runtime);
toggle_done(0, NULL, NULL);
for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i].thread, NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
/* cleanup & report results */
cond_destroy(&thread_parent);
cond_destroy(&thread_worker);
mutex_destroy(&thread_lock);
for (i = 0; i < params.nthreads; i++) {
unsigned long t = bench__runtime.tv_sec > 0 ?
worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
if (!params.silent)
printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
worker[i].tid, worker[i].futex, t);
if (params.multi)
zfree(&worker[i].futex);
}
print_summary();
free(worker);
perf_cpu_map__put(cpu);
return ret;
err:
usage_with_options(bench_futex_lock_pi_usage, options);
exit(EXIT_FAILURE);
}
| linux-master | tools/perf/bench/futex-lock-pi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Davidlohr Bueso <[email protected]>
*
* futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing.
*
* This program is particularly useful for measuring the kernel's futex hash
* table/function implementation. In order for it to make sense, use with as
* many threads and futexes as possible.
*/
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <perf/cpumap.h>
#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include "bench.h"
#include "futex.h"
#include <err.h>
static bool done = false;
static int futex_flag = 0;
struct timeval bench__start, bench__end, bench__runtime;
static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats throughput_stats;
static struct cond thread_parent, thread_worker;
struct worker {
int tid;
u_int32_t *futex;
pthread_t thread;
unsigned long ops;
};
static struct bench_futex_parameters params = {
.nfutexes = 1024,
.runtime = 10,
};
static const struct option options[] = {
OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"),
OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"),
OPT_UINTEGER('f', "futexes", ¶ms.nfutexes, "Specify amount of futexes per threads"),
OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"),
OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"),
OPT_END()
};
static const char * const bench_futex_hash_usage[] = {
"perf bench futex hash <options>",
NULL
};
static void *workerfn(void *arg)
{
int ret;
struct worker *w = (struct worker *) arg;
unsigned int i;
unsigned long ops = w->ops; /* avoid cacheline bouncing */
mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
cond_signal(&thread_parent);
cond_wait(&thread_worker, &thread_lock);
mutex_unlock(&thread_lock);
do {
for (i = 0; i < params.nfutexes; i++, ops++) {
/*
* We want the futex calls to fail in order to stress
* the hashing of uaddr and not measure other steps,
* such as internal waitqueue handling, thus enlarging
* the critical region protected by hb->lock.
*/
ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
if (!params.silent &&
(!ret || errno != EAGAIN || errno != EWOULDBLOCK))
warn("Non-expected futex return call");
}
} while (!done);
w->ops = ops;
return NULL;
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
/* inform all threads that we're done for the day */
done = true;
gettimeofday(&bench__end, NULL);
timersub(&bench__end, &bench__start, &bench__runtime);
}
static void print_summary(void)
{
unsigned long avg = avg_stats(&throughput_stats);
double stddev = stddev_stats(&throughput_stats);
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
!params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
(int)bench__runtime.tv_sec);
}
int bench_futex_hash(int argc, const char **argv)
{
int ret = 0;
cpu_set_t *cpuset;
struct sigaction act;
unsigned int i;
pthread_attr_t thread_attr;
struct worker *worker = NULL;
struct perf_cpu_map *cpu;
int nrcpus;
size_t size;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) {
usage_with_options(bench_futex_hash_usage, options);
exit(EXIT_FAILURE);
}
cpu = perf_cpu_map__new(NULL);
if (!cpu)
goto errmem;
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (params.mlockall) {
if (mlockall(MCL_CURRENT | MCL_FUTURE))
err(EXIT_FAILURE, "mlockall");
}
if (!params.nthreads) /* default to the number of CPUs */
params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
goto errmem;
if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime);
init_stats(&throughput_stats);
mutex_init(&thread_lock);
cond_init(&thread_parent);
cond_init(&thread_worker);
threads_starting = params.nthreads;
pthread_attr_init(&thread_attr);
gettimeofday(&bench__start, NULL);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < params.nthreads; i++) {
worker[i].tid = i;
worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
if (!worker[i].futex)
goto errmem;
CPU_ZERO_S(size, cpuset);
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
(void *)(struct worker *) &worker[i]);
if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
}
}
CPU_FREE(cpuset);
pthread_attr_destroy(&thread_attr);
mutex_lock(&thread_lock);
while (threads_starting)
cond_wait(&thread_parent, &thread_lock);
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
sleep(params.runtime);
toggle_done(0, NULL, NULL);
for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i].thread, NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
/* cleanup & report results */
cond_destroy(&thread_parent);
cond_destroy(&thread_worker);
mutex_destroy(&thread_lock);
for (i = 0; i < params.nthreads; i++) {
unsigned long t = bench__runtime.tv_sec > 0 ?
worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
if (!params.silent) {
if (params.nfutexes == 1)
printf("[thread %2d] futex: %p [ %ld ops/sec ]\n",
worker[i].tid, &worker[i].futex[0], t);
else
printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n",
worker[i].tid, &worker[i].futex[0],
&worker[i].futex[params.nfutexes-1], t);
}
zfree(&worker[i].futex);
}
print_summary();
free(worker);
free(cpu);
return ret;
errmem:
err(EXIT_FAILURE, "calloc");
}
| linux-master | tools/perf/bench/futex-hash.c |
// SPDX-License-Identifier: GPL-2.0
/*
*
* sched-messaging.c
*
* messaging: Benchmark for scheduler and IPC mechanisms
*
* Based on hackbench by Rusty Russell <[email protected]>
* Ported to perf by Hitoshi Mitake <[email protected]>
*
*/
#include <subcmd/parse-options.h>
#include "bench.h"
/* Test groups of 20 processes spraying to 20 receivers */
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <poll.h>
#include <limits.h>
#include <err.h>
#include <linux/list.h>
#include <linux/time64.h>
#define DATASIZE 100
static bool use_pipes = false;
static unsigned int nr_loops = 100;
static bool thread_mode = false;
static unsigned int num_groups = 10;
static struct list_head sender_contexts = LIST_HEAD_INIT(sender_contexts);
static struct list_head receiver_contexts = LIST_HEAD_INIT(receiver_contexts);
struct sender_context {
struct list_head list;
unsigned int num_fds;
int ready_out;
int wakefd;
int out_fds[];
};
struct receiver_context {
struct list_head list;
unsigned int num_packets;
int in_fds[2];
int ready_out;
int wakefd;
};
static void fdpair(int fds[2])
{
if (use_pipes) {
if (pipe(fds) == 0)
return;
} else {
if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0)
return;
}
err(EXIT_FAILURE, use_pipes ? "pipe()" : "socketpair()");
}
/* Block until we're ready to go */
static void ready(int ready_out, int wakefd)
{
struct pollfd pollfd = { .fd = wakefd, .events = POLLIN };
/* Tell them we're ready. */
if (write(ready_out, "R", 1) != 1)
err(EXIT_FAILURE, "CLIENT: ready write");
/* Wait for "GO" signal */
if (poll(&pollfd, 1, -1) != 1)
err(EXIT_FAILURE, "poll");
}
/* Sender sprays nr_loops messages down each file descriptor */
static void *sender(struct sender_context *ctx)
{
char data[DATASIZE];
unsigned int i, j;
ready(ctx->ready_out, ctx->wakefd);
memset(data, 'S', sizeof(data));
/* Now pump to every receiver. */
for (i = 0; i < nr_loops; i++) {
for (j = 0; j < ctx->num_fds; j++) {
int ret, done = 0;
again:
ret = write(ctx->out_fds[j], data + done,
sizeof(data)-done);
if (ret < 0)
err(EXIT_FAILURE, "SENDER: write");
done += ret;
if (done < DATASIZE)
goto again;
}
}
return NULL;
}
/* One receiver per fd */
static void *receiver(struct receiver_context* ctx)
{
unsigned int i;
if (!thread_mode)
close(ctx->in_fds[1]);
/* Wait for start... */
ready(ctx->ready_out, ctx->wakefd);
/* Receive them all */
for (i = 0; i < ctx->num_packets; i++) {
char data[DATASIZE];
int ret, done = 0;
again:
ret = read(ctx->in_fds[0], data + done, DATASIZE - done);
if (ret < 0)
err(EXIT_FAILURE, "SERVER: read");
done += ret;
if (done < DATASIZE)
goto again;
}
return NULL;
}
static pthread_t create_worker(void *ctx, void *(*func)(void *))
{
pthread_attr_t attr;
pthread_t childid;
int ret;
if (!thread_mode) {
/* process mode */
/* Fork the receiver. */
switch (fork()) {
case -1:
err(EXIT_FAILURE, "fork()");
break;
case 0:
(*func) (ctx);
exit(0);
break;
default:
break;
}
return (pthread_t)0;
}
if (pthread_attr_init(&attr) != 0)
err(EXIT_FAILURE, "pthread_attr_init:");
#ifndef __ia64__
if (pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0)
err(EXIT_FAILURE, "pthread_attr_setstacksize");
#endif
ret = pthread_create(&childid, &attr, func, ctx);
if (ret != 0)
err(EXIT_FAILURE, "pthread_create failed");
pthread_attr_destroy(&attr);
return childid;
}
static void reap_worker(pthread_t id)
{
int proc_status;
void *thread_status;
if (!thread_mode) {
/* process mode */
wait(&proc_status);
if (!WIFEXITED(proc_status))
exit(1);
} else {
pthread_join(id, &thread_status);
}
}
/* One group of senders and receivers */
static unsigned int group(pthread_t *pth,
unsigned int num_fds,
int ready_out,
int wakefd)
{
unsigned int i;
struct sender_context *snd_ctx = malloc(sizeof(struct sender_context)
+ num_fds * sizeof(int));
if (!snd_ctx)
err(EXIT_FAILURE, "malloc()");
list_add(&snd_ctx->list, &sender_contexts);
for (i = 0; i < num_fds; i++) {
int fds[2];
struct receiver_context *ctx = malloc(sizeof(*ctx));
if (!ctx)
err(EXIT_FAILURE, "malloc()");
list_add(&ctx->list, &receiver_contexts);
/* Create the pipe between client and server */
fdpair(fds);
ctx->num_packets = num_fds * nr_loops;
ctx->in_fds[0] = fds[0];
ctx->in_fds[1] = fds[1];
ctx->ready_out = ready_out;
ctx->wakefd = wakefd;
pth[i] = create_worker(ctx, (void *)receiver);
snd_ctx->out_fds[i] = fds[1];
if (!thread_mode)
close(fds[0]);
}
/* Now we have all the fds, fork the senders */
for (i = 0; i < num_fds; i++) {
snd_ctx->ready_out = ready_out;
snd_ctx->wakefd = wakefd;
snd_ctx->num_fds = num_fds;
pth[num_fds+i] = create_worker(snd_ctx, (void *)sender);
}
/* Close the fds we have left */
if (!thread_mode)
for (i = 0; i < num_fds; i++)
close(snd_ctx->out_fds[i]);
/* Return number of children to reap */
return num_fds * 2;
}
static const struct option options[] = {
OPT_BOOLEAN('p', "pipe", &use_pipes,
"Use pipe() instead of socketpair()"),
OPT_BOOLEAN('t', "thread", &thread_mode,
"Be multi thread instead of multi process"),
OPT_UINTEGER('g', "group", &num_groups, "Specify number of groups"),
OPT_UINTEGER('l', "nr_loops", &nr_loops, "Specify the number of loops to run (default: 100)"),
OPT_END()
};
static const char * const bench_sched_message_usage[] = {
"perf bench sched messaging <options>",
NULL
};
int bench_sched_messaging(int argc, const char **argv)
{
unsigned int i, total_children;
struct timeval start, stop, diff;
unsigned int num_fds = 20;
int readyfds[2], wakefds[2];
char dummy;
pthread_t *pth_tab;
struct sender_context *pos, *n;
argc = parse_options(argc, argv, options,
bench_sched_message_usage, 0);
pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t));
if (!pth_tab)
err(EXIT_FAILURE, "main:malloc()");
fdpair(readyfds);
fdpair(wakefds);
total_children = 0;
for (i = 0; i < num_groups; i++)
total_children += group(pth_tab+total_children, num_fds,
readyfds[1], wakefds[0]);
/* Wait for everyone to be ready */
for (i = 0; i < total_children; i++)
if (read(readyfds[0], &dummy, 1) != 1)
err(EXIT_FAILURE, "Reading for readyfds");
gettimeofday(&start, NULL);
/* Kick them off */
if (write(wakefds[1], &dummy, 1) != 1)
err(EXIT_FAILURE, "Writing to start them");
/* Reap them all */
for (i = 0; i < total_children; i++)
reap_worker(pth_tab[i]);
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
printf("# %d sender and receiver %s per group\n",
num_fds, thread_mode ? "threads" : "processes");
printf("# %d groups == %d %s run\n\n",
num_groups, num_groups * 2 * num_fds,
thread_mode ? "threads" : "processes");
printf(" %14s: %lu.%03lu [sec]\n", "Total time",
(unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
break;
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n", (unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
break;
default:
/* reaching here is something disaster */
fprintf(stderr, "Unknown format:%d\n", bench_format);
exit(1);
break;
}
free(pth_tab);
list_for_each_entry_safe(pos, n, &sender_contexts, list) {
list_del_init(&pos->list);
free(pos);
}
list_for_each_entry_safe(pos, n, &receiver_contexts, list) {
list_del_init(&pos->list);
free(pos);
}
return 0;
}
| linux-master | tools/perf/bench/sched-messaging.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Benchmark scanning sysfs files for PMU information.
*
* Copyright 2023 Google LLC.
*/
#include <stdio.h>
#include "bench.h"
#include "util/debug.h"
#include "util/pmu.h"
#include "util/pmus.h"
#include "util/stat.h"
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/time64.h>
#include <subcmd/parse-options.h>
static unsigned int iterations = 100;
struct pmu_scan_result {
char *name;
int nr_aliases;
int nr_formats;
int nr_caps;
bool is_core;
};
static const struct option options[] = {
OPT_UINTEGER('i', "iterations", &iterations,
"Number of iterations used to compute average"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench internals pmu-scan <options>",
NULL
};
static int nr_pmus;
static struct pmu_scan_result *results;
static int save_result(void)
{
struct perf_pmu *pmu = NULL;
struct list_head *list;
struct pmu_scan_result *r;
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
r = realloc(results, (nr_pmus + 1) * sizeof(*r));
if (r == NULL)
return -ENOMEM;
results = r;
r = results + nr_pmus;
r->name = strdup(pmu->name);
r->is_core = pmu->is_core;
r->nr_caps = pmu->nr_caps;
r->nr_aliases = perf_pmu__num_events(pmu);
r->nr_formats = 0;
list_for_each(list, &pmu->format)
r->nr_formats++;
pr_debug("pmu[%d] name=%s, nr_caps=%d, nr_aliases=%d, nr_formats=%d\n",
nr_pmus, r->name, r->nr_caps, r->nr_aliases, r->nr_formats);
nr_pmus++;
}
perf_pmus__destroy();
return 0;
}
static int check_result(bool core_only)
{
struct pmu_scan_result *r;
struct perf_pmu *pmu;
struct list_head *list;
int nr;
for (int i = 0; i < nr_pmus; i++) {
r = &results[i];
if (core_only && !r->is_core)
continue;
pmu = perf_pmus__find(r->name);
if (pmu == NULL) {
pr_err("Cannot find PMU %s\n", r->name);
return -1;
}
if (pmu->nr_caps != (u32)r->nr_caps) {
pr_err("Unmatched number of event caps in %s: expect %d vs got %d\n",
pmu->name, r->nr_caps, pmu->nr_caps);
return -1;
}
nr = perf_pmu__num_events(pmu);
if (nr != r->nr_aliases) {
pr_err("Unmatched number of event aliases in %s: expect %d vs got %d\n",
pmu->name, r->nr_aliases, nr);
return -1;
}
nr = 0;
list_for_each(list, &pmu->format)
nr++;
if (nr != r->nr_formats) {
pr_err("Unmatched number of event formats in %s: expect %d vs got %d\n",
pmu->name, r->nr_formats, nr);
return -1;
}
}
return 0;
}
static void delete_result(void)
{
for (int i = 0; i < nr_pmus; i++)
free(results[i].name);
free(results);
results = NULL;
nr_pmus = 0;
}
static int run_pmu_scan(void)
{
struct stats stats;
struct timeval start, end, diff;
double time_average, time_stddev;
u64 runtime_us;
int ret;
init_stats(&stats);
pr_info("Computing performance of sysfs PMU event scan for %u times\n",
iterations);
if (save_result() < 0) {
pr_err("Failed to initialize PMU scan result\n");
return -1;
}
for (int j = 0; j < 2; j++) {
bool core_only = (j == 0);
for (unsigned int i = 0; i < iterations; i++) {
gettimeofday(&start, NULL);
if (core_only)
perf_pmus__scan_core(NULL);
else
perf_pmus__scan(NULL);
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&stats, runtime_us);
ret = check_result(core_only);
perf_pmus__destroy();
if (ret < 0)
break;
}
time_average = avg_stats(&stats);
time_stddev = stddev_stats(&stats);
pr_info(" Average%s PMU scanning took: %.3f usec (+- %.3f usec)\n",
core_only ? " core" : "", time_average, time_stddev);
}
delete_result();
return 0;
}
int bench_pmu_scan(int argc, const char **argv)
{
int err = 0;
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
err = run_pmu_scan();
return err;
}
| linux-master | tools/perf/bench/pmu-scan.c |
// SPDX-License-Identifier: GPL-2.0
/*
* numa.c
*
* numa: Simulate NUMA-sensitive workload and measure their NUMA performance
*/
#include <inttypes.h>
#include <subcmd/parse-options.h>
#include "../util/cloexec.h"
#include "bench.h"
#include <errno.h>
#include <sched.h>
#include <stdio.h>
#include <assert.h>
#include <debug.h>
#include <malloc.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <sys/types.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <linux/numa.h>
#include <linux/zalloc.h>
#include "../util/header.h"
#include "../util/mutex.h"
#include <numa.h>
#include <numaif.h>
#ifndef RUSAGE_THREAD
# define RUSAGE_THREAD 1
#endif
/*
* Regular printout to the terminal, suppressed if -q is specified:
*/
#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
/*
* Debug printf:
*/
#undef dprintf
#define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
struct thread_data {
int curr_cpu;
cpu_set_t *bind_cpumask;
int bind_node;
u8 *process_data;
int process_nr;
int thread_nr;
int task_nr;
unsigned int loops_done;
u64 val;
u64 runtime_ns;
u64 system_time_ns;
u64 user_time_ns;
double speed_gbs;
struct mutex *process_lock;
};
/* Parameters set by options: */
struct params {
/* Startup synchronization: */
bool serialize_startup;
/* Task hierarchy: */
int nr_proc;
int nr_threads;
/* Working set sizes: */
const char *mb_global_str;
const char *mb_proc_str;
const char *mb_proc_locked_str;
const char *mb_thread_str;
double mb_global;
double mb_proc;
double mb_proc_locked;
double mb_thread;
/* Access patterns to the working set: */
bool data_reads;
bool data_writes;
bool data_backwards;
bool data_zero_memset;
bool data_rand_walk;
u32 nr_loops;
u32 nr_secs;
u32 sleep_usecs;
/* Working set initialization: */
bool init_zero;
bool init_random;
bool init_cpu0;
/* Misc options: */
int show_details;
int run_all;
int thp;
long bytes_global;
long bytes_process;
long bytes_process_locked;
long bytes_thread;
int nr_tasks;
bool show_convergence;
bool measure_convergence;
int perturb_secs;
int nr_cpus;
int nr_nodes;
/* Affinity options -C and -N: */
char *cpu_list_str;
char *node_list_str;
};
/* Global, read-writable area, accessible to all processes and threads: */
struct global_info {
u8 *data;
struct mutex startup_mutex;
struct cond startup_cond;
int nr_tasks_started;
struct mutex start_work_mutex;
struct cond start_work_cond;
int nr_tasks_working;
bool start_work;
struct mutex stop_work_mutex;
u64 bytes_done;
struct thread_data *threads;
/* Convergence latency measurement: */
bool all_converged;
bool stop_work;
int print_once;
struct params p;
};
static struct global_info *g = NULL;
static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
struct params p0;
static const struct option options[] = {
OPT_INTEGER('p', "nr_proc" , &p0.nr_proc, "number of processes"),
OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"),
OPT_STRING('G', "mb_global" , &p0.mb_global_str, "MB", "global memory (MBs)"),
OPT_STRING('P', "mb_proc" , &p0.mb_proc_str, "MB", "process memory (MBs)"),
OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"),
OPT_UINTEGER('l', "nr_loops" , &p0.nr_loops, "max number of loops to run (default: unlimited)"),
OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run (default: 5 secs)"),
OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"),
OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via reads (can be mixed with -W)"),
OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"),
OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"),
OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk, "access the data with random (32bit LFSR) walk"),
OPT_BOOLEAN('z', "init_zero" , &p0.init_zero, "bzero the initial allocations"),
OPT_BOOLEAN('I', "init_random" , &p0.init_random, "randomize the contents of the initial allocations"),
OPT_BOOLEAN('0', "init_cpu0" , &p0.init_cpu0, "do the initial allocations on CPU#0"),
OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs, "perturb thread 0/0 every X secs, to test convergence stability"),
OPT_INCR ('d', "show_details" , &p0.show_details, "Show details"),
OPT_INCR ('a', "all" , &p0.run_all, "Run all tests in the suite"),
OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
"convergence is reached when each process (all its threads) is running on a single NUMA node."),
OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
OPT_BOOLEAN('q', "quiet" , &quiet,
"quiet mode (do not show any warnings or messages)"),
OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
/* Special option string parsing callbacks: */
OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
"bind the first N tasks to these specific cpus (the rest is unbound)",
parse_cpus_opt),
OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
"bind the first N tasks to these specific memory nodes (the rest is unbound)",
parse_nodes_opt),
OPT_END()
};
static const char * const bench_numa_usage[] = {
"perf bench numa <options>",
NULL
};
static const char * const numa_usage[] = {
"perf bench numa mem [<options>]",
NULL
};
/*
* To get number of numa nodes present.
*/
static int nr_numa_nodes(void)
{
int i, nr_nodes = 0;
for (i = 0; i < g->p.nr_nodes; i++) {
if (numa_bitmask_isbitset(numa_nodes_ptr, i))
nr_nodes++;
}
return nr_nodes;
}
/*
* To check if given numa node is present.
*/
static int is_node_present(int node)
{
return numa_bitmask_isbitset(numa_nodes_ptr, node);
}
/*
* To check given numa node has cpus.
*/
static bool node_has_cpus(int node)
{
struct bitmask *cpumask = numa_allocate_cpumask();
bool ret = false; /* fall back to nocpus */
int cpu;
BUG_ON(!cpumask);
if (!numa_node_to_cpus(node, cpumask)) {
for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
if (numa_bitmask_isbitset(cpumask, cpu)) {
ret = true;
break;
}
}
}
numa_free_cpumask(cpumask);
return ret;
}
static cpu_set_t *bind_to_cpu(int target_cpu)
{
int nrcpus = numa_num_possible_cpus();
cpu_set_t *orig_mask, *mask;
size_t size;
orig_mask = CPU_ALLOC(nrcpus);
BUG_ON(!orig_mask);
size = CPU_ALLOC_SIZE(nrcpus);
CPU_ZERO_S(size, orig_mask);
if (sched_getaffinity(0, size, orig_mask))
goto err_out;
mask = CPU_ALLOC(nrcpus);
if (!mask)
goto err_out;
CPU_ZERO_S(size, mask);
if (target_cpu == -1) {
int cpu;
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
CPU_SET_S(cpu, size, mask);
} else {
if (target_cpu < 0 || target_cpu >= g->p.nr_cpus)
goto err;
CPU_SET_S(target_cpu, size, mask);
}
if (sched_setaffinity(0, size, mask))
goto err;
return orig_mask;
err:
CPU_FREE(mask);
err_out:
CPU_FREE(orig_mask);
/* BUG_ON due to failure in allocation of orig_mask/mask */
BUG_ON(-1);
return NULL;
}
static cpu_set_t *bind_to_node(int target_node)
{
int nrcpus = numa_num_possible_cpus();
size_t size;
cpu_set_t *orig_mask, *mask;
int cpu;
orig_mask = CPU_ALLOC(nrcpus);
BUG_ON(!orig_mask);
size = CPU_ALLOC_SIZE(nrcpus);
CPU_ZERO_S(size, orig_mask);
if (sched_getaffinity(0, size, orig_mask))
goto err_out;
mask = CPU_ALLOC(nrcpus);
if (!mask)
goto err_out;
CPU_ZERO_S(size, mask);
if (target_node == NUMA_NO_NODE) {
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
CPU_SET_S(cpu, size, mask);
} else {
struct bitmask *cpumask = numa_allocate_cpumask();
if (!cpumask)
goto err;
if (!numa_node_to_cpus(target_node, cpumask)) {
for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
if (numa_bitmask_isbitset(cpumask, cpu))
CPU_SET_S(cpu, size, mask);
}
}
numa_free_cpumask(cpumask);
}
if (sched_setaffinity(0, size, mask))
goto err;
return orig_mask;
err:
CPU_FREE(mask);
err_out:
CPU_FREE(orig_mask);
/* BUG_ON due to failure in allocation of orig_mask/mask */
BUG_ON(-1);
return NULL;
}
static void bind_to_cpumask(cpu_set_t *mask)
{
int ret;
size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus());
ret = sched_setaffinity(0, size, mask);
if (ret) {
CPU_FREE(mask);
BUG_ON(ret);
}
}
static void mempol_restore(void)
{
int ret;
ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
BUG_ON(ret);
}
static void bind_to_memnode(int node)
{
struct bitmask *node_mask;
int ret;
if (node == NUMA_NO_NODE)
return;
node_mask = numa_allocate_nodemask();
BUG_ON(!node_mask);
numa_bitmask_clearall(node_mask);
numa_bitmask_setbit(node_mask, node);
ret = set_mempolicy(MPOL_BIND, node_mask->maskp, node_mask->size + 1);
dprintf("binding to node %d, mask: %016lx => %d\n", node, *node_mask->maskp, ret);
numa_bitmask_free(node_mask);
BUG_ON(ret);
}
#define HPSIZE (2*1024*1024)
#define set_taskname(fmt...) \
do { \
char name[20]; \
\
snprintf(name, 20, fmt); \
prctl(PR_SET_NAME, name); \
} while (0)
static u8 *alloc_data(ssize_t bytes0, int map_flags,
int init_zero, int init_cpu0, int thp, int init_random)
{
cpu_set_t *orig_mask = NULL;
ssize_t bytes;
u8 *buf;
int ret;
if (!bytes0)
return NULL;
/* Allocate and initialize all memory on CPU#0: */
if (init_cpu0) {
int node = numa_node_of_cpu(0);
orig_mask = bind_to_node(node);
bind_to_memnode(node);
}
bytes = bytes0 + HPSIZE;
buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
BUG_ON(buf == (void *)-1);
if (map_flags == MAP_PRIVATE) {
if (thp > 0) {
ret = madvise(buf, bytes, MADV_HUGEPAGE);
if (ret && !g->print_once) {
g->print_once = 1;
printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
}
}
if (thp < 0) {
ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
if (ret && !g->print_once) {
g->print_once = 1;
printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
}
}
}
if (init_zero) {
bzero(buf, bytes);
} else {
/* Initialize random contents, different in each word: */
if (init_random) {
u64 *wbuf = (void *)buf;
long off = rand();
long i;
for (i = 0; i < bytes/8; i++)
wbuf[i] = i + off;
}
}
/* Align to 2MB boundary: */
buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
/* Restore affinity: */
if (init_cpu0) {
bind_to_cpumask(orig_mask);
CPU_FREE(orig_mask);
mempol_restore();
}
return buf;
}
static void free_data(void *data, ssize_t bytes)
{
int ret;
if (!data)
return;
ret = munmap(data, bytes);
BUG_ON(ret);
}
/*
* Create a shared memory buffer that can be shared between processes, zeroed:
*/
static void * zalloc_shared_data(ssize_t bytes)
{
return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0, g->p.thp, g->p.init_random);
}
/*
* Create a shared memory buffer that can be shared between processes:
*/
static void * setup_shared_data(ssize_t bytes)
{
return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0, g->p.thp, g->p.init_random);
}
/*
* Allocate process-local memory - this will either be shared between
* threads of this process, or only be accessed by this thread:
*/
static void * setup_private_data(ssize_t bytes)
{
return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random);
}
static int parse_cpu_list(const char *arg)
{
p0.cpu_list_str = strdup(arg);
dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
return 0;
}
static int parse_setup_cpu_list(void)
{
struct thread_data *td;
char *str0, *str;
int t;
if (!g->p.cpu_list_str)
return 0;
dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
str0 = str = strdup(g->p.cpu_list_str);
t = 0;
BUG_ON(!str);
tprintf("# binding tasks to CPUs:\n");
tprintf("# ");
while (true) {
int bind_cpu, bind_cpu_0, bind_cpu_1;
char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
int bind_len;
int step;
int mul;
tok = strsep(&str, ",");
if (!tok)
break;
tok_end = strstr(tok, "-");
dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
if (!tok_end) {
/* Single CPU specified: */
bind_cpu_0 = bind_cpu_1 = atol(tok);
} else {
/* CPU range specified (for example: "5-11"): */
bind_cpu_0 = atol(tok);
bind_cpu_1 = atol(tok_end + 1);
}
step = 1;
tok_step = strstr(tok, "#");
if (tok_step) {
step = atol(tok_step + 1);
BUG_ON(step <= 0 || step >= g->p.nr_cpus);
}
/*
* Mask length.
* Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
* where the _4 means the next 4 CPUs are allowed.
*/
bind_len = 1;
tok_len = strstr(tok, "_");
if (tok_len) {
bind_len = atol(tok_len + 1);
BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
}
/* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
mul = 1;
tok_mul = strstr(tok, "x");
if (tok_mul) {
mul = atol(tok_mul + 1);
BUG_ON(mul <= 0);
}
dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
return -1;
}
if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) {
printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n");
return -1;
}
BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
BUG_ON(bind_cpu_0 > bind_cpu_1);
for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);
int i;
for (i = 0; i < mul; i++) {
int cpu;
if (t >= g->p.nr_tasks) {
printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
goto out;
}
td = g->threads + t;
if (t)
tprintf(",");
if (bind_len > 1) {
tprintf("%2d/%d", bind_cpu, bind_len);
} else {
tprintf("%2d", bind_cpu);
}
td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
BUG_ON(!td->bind_cpumask);
CPU_ZERO_S(size, td->bind_cpumask);
for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
if (cpu < 0 || cpu >= g->p.nr_cpus) {
CPU_FREE(td->bind_cpumask);
BUG_ON(-1);
}
CPU_SET_S(cpu, size, td->bind_cpumask);
}
t++;
}
}
}
out:
tprintf("\n");
if (t < g->p.nr_tasks)
printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
free(str0);
return 0;
}
static int parse_cpus_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
if (!arg)
return -1;
return parse_cpu_list(arg);
}
static int parse_node_list(const char *arg)
{
p0.node_list_str = strdup(arg);
dprintf("got NODE list: {%s}\n", p0.node_list_str);
return 0;
}
static int parse_setup_node_list(void)
{
struct thread_data *td;
char *str0, *str;
int t;
if (!g->p.node_list_str)
return 0;
dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
str0 = str = strdup(g->p.node_list_str);
t = 0;
BUG_ON(!str);
tprintf("# binding tasks to NODEs:\n");
tprintf("# ");
while (true) {
int bind_node, bind_node_0, bind_node_1;
char *tok, *tok_end, *tok_step, *tok_mul;
int step;
int mul;
tok = strsep(&str, ",");
if (!tok)
break;
tok_end = strstr(tok, "-");
dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
if (!tok_end) {
/* Single NODE specified: */
bind_node_0 = bind_node_1 = atol(tok);
} else {
/* NODE range specified (for example: "5-11"): */
bind_node_0 = atol(tok);
bind_node_1 = atol(tok_end + 1);
}
step = 1;
tok_step = strstr(tok, "#");
if (tok_step) {
step = atol(tok_step + 1);
BUG_ON(step <= 0 || step >= g->p.nr_nodes);
}
/* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
mul = 1;
tok_mul = strstr(tok, "x");
if (tok_mul) {
mul = atol(tok_mul + 1);
BUG_ON(mul <= 0);
}
dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
return -1;
}
BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
BUG_ON(bind_node_0 > bind_node_1);
for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
int i;
for (i = 0; i < mul; i++) {
if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
goto out;
}
td = g->threads + t;
if (!t)
tprintf(" %2d", bind_node);
else
tprintf(",%2d", bind_node);
td->bind_node = bind_node;
t++;
}
}
}
out:
tprintf("\n");
if (t < g->p.nr_tasks)
printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
free(str0);
return 0;
}
static int parse_nodes_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
if (!arg)
return -1;
return parse_node_list(arg);
}
static inline uint32_t lfsr_32(uint32_t lfsr)
{
const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
}
/*
* Make sure there's real data dependency to RAM (when read
* accesses are enabled), so the compiler, the CPU and the
* kernel (KSM, zero page, etc.) cannot optimize away RAM
* accesses:
*/
static inline u64 access_data(u64 *data, u64 val)
{
if (g->p.data_reads)
val += *data;
if (g->p.data_writes)
*data = val + 1;
return val;
}
/*
* The worker process does two types of work, a forwards going
* loop and a backwards going loop.
*
* We do this so that on multiprocessor systems we do not create
* a 'train' of processing, with highly synchronized processes,
* skewing the whole benchmark.
*/
static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
{
long words = bytes/sizeof(u64);
u64 *data = (void *)__data;
long chunk_0, chunk_1;
u64 *d0, *d, *d1;
long off;
long i;
BUG_ON(!data && words);
BUG_ON(data && !words);
if (!data)
return val;
/* Very simple memset() work variant: */
if (g->p.data_zero_memset && !g->p.data_rand_walk) {
bzero(data, bytes);
return val;
}
/* Spread out by PID/TID nr and by loop nr: */
chunk_0 = words/nr_max;
chunk_1 = words/g->p.nr_loops;
off = nr*chunk_0 + loop*chunk_1;
while (off >= words)
off -= words;
if (g->p.data_rand_walk) {
u32 lfsr = nr + loop + val;
long j;
for (i = 0; i < words/1024; i++) {
long start, end;
lfsr = lfsr_32(lfsr);
start = lfsr % words;
end = min(start + 1024, words-1);
if (g->p.data_zero_memset) {
bzero(data + start, (end-start) * sizeof(u64));
} else {
for (j = start; j < end; j++)
val = access_data(data + j, val);
}
}
} else if (!g->p.data_backwards || (nr + loop) & 1) {
/* Process data forwards: */
d0 = data + off;
d = data + off + 1;
d1 = data + words;
for (;;) {
if (unlikely(d >= d1))
d = data;
if (unlikely(d == d0))
break;
val = access_data(d, val);
d++;
}
} else {
/* Process data backwards: */
d0 = data + off;
d = data + off - 1;
d1 = data + words;
for (;;) {
if (unlikely(d < data))
d = data + words-1;
if (unlikely(d == d0))
break;
val = access_data(d, val);
d--;
}
}
return val;
}
static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
{
unsigned int cpu;
cpu = sched_getcpu();
g->threads[task_nr].curr_cpu = cpu;
prctl(0, bytes_worked);
}
/*
* Count the number of nodes a process's threads
* are spread out on.
*
* A count of 1 means that the process is compressed
* to a single node. A count of g->p.nr_nodes means it's
* spread out on the whole system.
*/
static int count_process_nodes(int process_nr)
{
char *node_present;
int nodes;
int n, t;
node_present = (char *)malloc(g->p.nr_nodes * sizeof(char));
BUG_ON(!node_present);
for (nodes = 0; nodes < g->p.nr_nodes; nodes++)
node_present[nodes] = 0;
for (t = 0; t < g->p.nr_threads; t++) {
struct thread_data *td;
int task_nr;
int node;
task_nr = process_nr*g->p.nr_threads + t;
td = g->threads + task_nr;
node = numa_node_of_cpu(td->curr_cpu);
if (node < 0) /* curr_cpu was likely still -1 */ {
free(node_present);
return 0;
}
node_present[node] = 1;
}
nodes = 0;
for (n = 0; n < g->p.nr_nodes; n++)
nodes += node_present[n];
free(node_present);
return nodes;
}
/*
* Count the number of distinct process-threads a node contains.
*
* A count of 1 means that the node contains only a single
* process. If all nodes on the system contain at most one
* process then we are well-converged.
*/
static int count_node_processes(int node)
{
int processes = 0;
int t, p;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
struct thread_data *td;
int task_nr;
int n;
task_nr = p*g->p.nr_threads + t;
td = g->threads + task_nr;
n = numa_node_of_cpu(td->curr_cpu);
if (n == node) {
processes++;
break;
}
}
}
return processes;
}
static void calc_convergence_compression(int *strong)
{
unsigned int nodes_min, nodes_max;
int p;
nodes_min = -1;
nodes_max = 0;
for (p = 0; p < g->p.nr_proc; p++) {
unsigned int nodes = count_process_nodes(p);
if (!nodes) {
*strong = 0;
return;
}
nodes_min = min(nodes, nodes_min);
nodes_max = max(nodes, nodes_max);
}
/* Strong convergence: all threads compress on a single node: */
if (nodes_min == 1 && nodes_max == 1) {
*strong = 1;
} else {
*strong = 0;
tprintf(" {%d-%d}", nodes_min, nodes_max);
}
}
static void calc_convergence(double runtime_ns_max, double *convergence)
{
unsigned int loops_done_min, loops_done_max;
int process_groups;
int *nodes;
int distance;
int nr_min;
int nr_max;
int strong;
int sum;
int nr;
int node;
int cpu;
int t;
if (!g->p.show_convergence && !g->p.measure_convergence)
return;
nodes = (int *)malloc(g->p.nr_nodes * sizeof(int));
BUG_ON(!nodes);
for (node = 0; node < g->p.nr_nodes; node++)
nodes[node] = 0;
loops_done_min = -1;
loops_done_max = 0;
for (t = 0; t < g->p.nr_tasks; t++) {
struct thread_data *td = g->threads + t;
unsigned int loops_done;
cpu = td->curr_cpu;
/* Not all threads have written it yet: */
if (cpu < 0)
continue;
node = numa_node_of_cpu(cpu);
nodes[node]++;
loops_done = td->loops_done;
loops_done_min = min(loops_done, loops_done_min);
loops_done_max = max(loops_done, loops_done_max);
}
nr_max = 0;
nr_min = g->p.nr_tasks;
sum = 0;
for (node = 0; node < g->p.nr_nodes; node++) {
if (!is_node_present(node))
continue;
nr = nodes[node];
nr_min = min(nr, nr_min);
nr_max = max(nr, nr_max);
sum += nr;
}
BUG_ON(nr_min > nr_max);
BUG_ON(sum > g->p.nr_tasks);
if (0 && (sum < g->p.nr_tasks)) {
free(nodes);
return;
}
/*
* Count the number of distinct process groups present
* on nodes - when we are converged this will decrease
* to g->p.nr_proc:
*/
process_groups = 0;
for (node = 0; node < g->p.nr_nodes; node++) {
int processes;
if (!is_node_present(node))
continue;
processes = count_node_processes(node);
nr = nodes[node];
tprintf(" %2d/%-2d", nr, processes);
process_groups += processes;
}
distance = nr_max - nr_min;
tprintf(" [%2d/%-2d]", distance, process_groups);
tprintf(" l:%3d-%-3d (%3d)",
loops_done_min, loops_done_max, loops_done_max-loops_done_min);
if (loops_done_min && loops_done_max) {
double skew = 1.0 - (double)loops_done_min/loops_done_max;
tprintf(" [%4.1f%%]", skew * 100.0);
}
calc_convergence_compression(&strong);
if (strong && process_groups == g->p.nr_proc) {
if (!*convergence) {
*convergence = runtime_ns_max;
tprintf(" (%6.1fs converged)\n", *convergence / NSEC_PER_SEC);
if (g->p.measure_convergence) {
g->all_converged = true;
g->stop_work = true;
}
}
} else {
if (*convergence) {
tprintf(" (%6.1fs de-converged)", runtime_ns_max / NSEC_PER_SEC);
*convergence = 0;
}
tprintf("\n");
}
free(nodes);
}
static void show_summary(double runtime_ns_max, int l, double *convergence)
{
tprintf("\r # %5.1f%% [%.1f mins]",
(double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0);
calc_convergence(runtime_ns_max, convergence);
if (g->p.show_details >= 0)
fflush(stdout);
}
static void *worker_thread(void *__tdata)
{
struct thread_data *td = __tdata;
struct timeval start0, start, stop, diff;
int process_nr = td->process_nr;
int thread_nr = td->thread_nr;
unsigned long last_perturbance;
int task_nr = td->task_nr;
int details = g->p.show_details;
int first_task, last_task;
double convergence = 0;
u64 val = td->val;
double runtime_ns_max;
u8 *global_data;
u8 *process_data;
u8 *thread_data;
u64 bytes_done, secs;
long work_done;
u32 l;
struct rusage rusage;
bind_to_cpumask(td->bind_cpumask);
bind_to_memnode(td->bind_node);
set_taskname("thread %d/%d", process_nr, thread_nr);
global_data = g->data;
process_data = td->process_data;
thread_data = setup_private_data(g->p.bytes_thread);
bytes_done = 0;
last_task = 0;
if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
last_task = 1;
first_task = 0;
if (process_nr == 0 && thread_nr == 0)
first_task = 1;
if (details >= 2) {
printf("# thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
process_nr, thread_nr, global_data, process_data, thread_data);
}
if (g->p.serialize_startup) {
mutex_lock(&g->startup_mutex);
g->nr_tasks_started++;
/* The last thread wakes the main process. */
if (g->nr_tasks_started == g->p.nr_tasks)
cond_signal(&g->startup_cond);
mutex_unlock(&g->startup_mutex);
/* Here we will wait for the main process to start us all at once: */
mutex_lock(&g->start_work_mutex);
g->start_work = false;
g->nr_tasks_working++;
while (!g->start_work)
cond_wait(&g->start_work_cond, &g->start_work_mutex);
mutex_unlock(&g->start_work_mutex);
}
gettimeofday(&start0, NULL);
start = stop = start0;
last_perturbance = start.tv_sec;
for (l = 0; l < g->p.nr_loops; l++) {
start = stop;
if (g->stop_work)
break;
val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val);
val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val);
val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val);
if (g->p.sleep_usecs) {
mutex_lock(td->process_lock);
usleep(g->p.sleep_usecs);
mutex_unlock(td->process_lock);
}
/*
* Amount of work to be done under a process-global lock:
*/
if (g->p.bytes_process_locked) {
mutex_lock(td->process_lock);
val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val);
mutex_unlock(td->process_lock);
}
work_done = g->p.bytes_global + g->p.bytes_process +
g->p.bytes_process_locked + g->p.bytes_thread;
update_curr_cpu(task_nr, work_done);
bytes_done += work_done;
if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
continue;
td->loops_done = l;
gettimeofday(&stop, NULL);
/* Check whether our max runtime timed out: */
if (g->p.nr_secs) {
timersub(&stop, &start0, &diff);
if ((u32)diff.tv_sec >= g->p.nr_secs) {
g->stop_work = true;
break;
}
}
/* Update the summary at most once per second: */
if (start.tv_sec == stop.tv_sec)
continue;
/*
* Perturb the first task's equilibrium every g->p.perturb_secs seconds,
* by migrating to CPU#0:
*/
if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
cpu_set_t *orig_mask;
int target_cpu;
int this_cpu;
last_perturbance = stop.tv_sec;
/*
* Depending on where we are running, move into
* the other half of the system, to create some
* real disturbance:
*/
this_cpu = g->threads[task_nr].curr_cpu;
if (this_cpu < g->p.nr_cpus/2)
target_cpu = g->p.nr_cpus-1;
else
target_cpu = 0;
orig_mask = bind_to_cpu(target_cpu);
/* Here we are running on the target CPU already */
if (details >= 1)
printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
bind_to_cpumask(orig_mask);
CPU_FREE(orig_mask);
}
if (details >= 3) {
timersub(&stop, &start, &diff);
runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
if (details >= 0) {
printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
process_nr, thread_nr, runtime_ns_max / bytes_done, val);
}
fflush(stdout);
}
if (!last_task)
continue;
timersub(&stop, &start0, &diff);
runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
show_summary(runtime_ns_max, l, &convergence);
}
gettimeofday(&stop, NULL);
timersub(&stop, &start0, &diff);
td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
secs = td->runtime_ns / NSEC_PER_SEC;
td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
getrusage(RUSAGE_THREAD, &rusage);
td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
td->system_time_ns += rusage.ru_stime.tv_usec * NSEC_PER_USEC;
td->user_time_ns = rusage.ru_utime.tv_sec * NSEC_PER_SEC;
td->user_time_ns += rusage.ru_utime.tv_usec * NSEC_PER_USEC;
free_data(thread_data, g->p.bytes_thread);
mutex_lock(&g->stop_work_mutex);
g->bytes_done += bytes_done;
mutex_unlock(&g->stop_work_mutex);
return NULL;
}
/*
* A worker process starts a couple of threads:
*/
static void worker_process(int process_nr)
{
struct mutex process_lock;
struct thread_data *td;
pthread_t *pthreads;
u8 *process_data;
int task_nr;
int ret;
int t;
mutex_init(&process_lock);
set_taskname("process %d", process_nr);
/*
* Pick up the memory policy and the CPU binding of our first thread,
* so that we initialize memory accordingly:
*/
task_nr = process_nr*g->p.nr_threads;
td = g->threads + task_nr;
bind_to_memnode(td->bind_node);
bind_to_cpumask(td->bind_cpumask);
pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
process_data = setup_private_data(g->p.bytes_process);
if (g->p.show_details >= 3) {
printf(" # process %2d global mem: %p, process mem: %p\n",
process_nr, g->data, process_data);
}
for (t = 0; t < g->p.nr_threads; t++) {
task_nr = process_nr*g->p.nr_threads + t;
td = g->threads + task_nr;
td->process_data = process_data;
td->process_nr = process_nr;
td->thread_nr = t;
td->task_nr = task_nr;
td->val = rand();
td->curr_cpu = -1;
td->process_lock = &process_lock;
ret = pthread_create(pthreads + t, NULL, worker_thread, td);
BUG_ON(ret);
}
for (t = 0; t < g->p.nr_threads; t++) {
ret = pthread_join(pthreads[t], NULL);
BUG_ON(ret);
}
free_data(process_data, g->p.bytes_process);
free(pthreads);
}
static void print_summary(void)
{
if (g->p.show_details < 0)
return;
printf("\n ###\n");
printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
printf(" # %5dx %5ldMB global shared mem operations\n",
g->p.nr_loops, g->p.bytes_global/1024/1024);
printf(" # %5dx %5ldMB process shared mem operations\n",
g->p.nr_loops, g->p.bytes_process/1024/1024);
printf(" # %5dx %5ldMB thread local mem operations\n",
g->p.nr_loops, g->p.bytes_thread/1024/1024);
printf(" ###\n");
printf("\n ###\n"); fflush(stdout);
}
static void init_thread_data(void)
{
ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
int t;
g->threads = zalloc_shared_data(size);
for (t = 0; t < g->p.nr_tasks; t++) {
struct thread_data *td = g->threads + t;
size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);
int cpu;
/* Allow all nodes by default: */
td->bind_node = NUMA_NO_NODE;
/* Allow all CPUs by default: */
td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
BUG_ON(!td->bind_cpumask);
CPU_ZERO_S(cpuset_size, td->bind_cpumask);
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
}
}
static void deinit_thread_data(void)
{
ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
int t;
/* Free the bind_cpumask allocated for thread_data */
for (t = 0; t < g->p.nr_tasks; t++) {
struct thread_data *td = g->threads + t;
CPU_FREE(td->bind_cpumask);
}
free_data(g->threads, size);
}
static int init(void)
{
g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
/* Copy over options: */
g->p = p0;
g->p.nr_cpus = numa_num_configured_cpus();
g->p.nr_nodes = numa_max_node() + 1;
/* char array in count_process_nodes(): */
BUG_ON(g->p.nr_nodes < 0);
if (quiet && !g->p.show_details)
g->p.show_details = -1;
/* Some memory should be specified: */
if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
return -1;
if (g->p.mb_global_str) {
g->p.mb_global = atof(g->p.mb_global_str);
BUG_ON(g->p.mb_global < 0);
}
if (g->p.mb_proc_str) {
g->p.mb_proc = atof(g->p.mb_proc_str);
BUG_ON(g->p.mb_proc < 0);
}
if (g->p.mb_proc_locked_str) {
g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
BUG_ON(g->p.mb_proc_locked < 0);
BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
}
if (g->p.mb_thread_str) {
g->p.mb_thread = atof(g->p.mb_thread_str);
BUG_ON(g->p.mb_thread < 0);
}
BUG_ON(g->p.nr_threads <= 0);
BUG_ON(g->p.nr_proc <= 0);
g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
g->p.bytes_global = g->p.mb_global *1024L*1024L;
g->p.bytes_process = g->p.mb_proc *1024L*1024L;
g->p.bytes_process_locked = g->p.mb_proc_locked *1024L*1024L;
g->p.bytes_thread = g->p.mb_thread *1024L*1024L;
g->data = setup_shared_data(g->p.bytes_global);
/* Startup serialization: */
mutex_init_pshared(&g->start_work_mutex);
cond_init_pshared(&g->start_work_cond);
mutex_init_pshared(&g->startup_mutex);
cond_init_pshared(&g->startup_cond);
mutex_init_pshared(&g->stop_work_mutex);
init_thread_data();
tprintf("#\n");
if (parse_setup_cpu_list() || parse_setup_node_list())
return -1;
tprintf("#\n");
print_summary();
return 0;
}
static void deinit(void)
{
free_data(g->data, g->p.bytes_global);
g->data = NULL;
deinit_thread_data();
free_data(g, sizeof(*g));
g = NULL;
}
/*
* Print a short or long result, depending on the verbosity setting:
*/
static void print_res(const char *name, double val,
const char *txt_unit, const char *txt_short, const char *txt_long)
{
if (!name)
name = "main,";
if (!quiet)
printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
else
printf(" %14.3f %s\n", val, txt_long);
}
static int __bench_numa(const char *name)
{
struct timeval start, stop, diff;
u64 runtime_ns_min, runtime_ns_sum;
pid_t *pids, pid, wpid;
double delta_runtime;
double runtime_avg;
double runtime_sec_max;
double runtime_sec_min;
int wait_stat;
double bytes;
int i, t, p;
if (init())
return -1;
pids = zalloc(g->p.nr_proc * sizeof(*pids));
pid = -1;
if (g->p.serialize_startup) {
tprintf(" #\n");
tprintf(" # Startup synchronization: ..."); fflush(stdout);
}
gettimeofday(&start, NULL);
for (i = 0; i < g->p.nr_proc; i++) {
pid = fork();
dprintf(" # process %2d: PID %d\n", i, pid);
BUG_ON(pid < 0);
if (!pid) {
/* Child process: */
worker_process(i);
exit(0);
}
pids[i] = pid;
}
if (g->p.serialize_startup) {
bool threads_ready = false;
double startup_sec;
/*
* Wait for all the threads to start up. The last thread will
* signal this process.
*/
mutex_lock(&g->startup_mutex);
while (g->nr_tasks_started != g->p.nr_tasks)
cond_wait(&g->startup_cond, &g->startup_mutex);
mutex_unlock(&g->startup_mutex);
/* Wait for all threads to be at the start_work_cond. */
while (!threads_ready) {
mutex_lock(&g->start_work_mutex);
threads_ready = (g->nr_tasks_working == g->p.nr_tasks);
mutex_unlock(&g->start_work_mutex);
if (!threads_ready)
usleep(1);
}
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
startup_sec = diff.tv_sec * NSEC_PER_SEC;
startup_sec += diff.tv_usec * NSEC_PER_USEC;
startup_sec /= NSEC_PER_SEC;
tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
tprintf(" #\n");
start = stop;
/* Start all threads running. */
mutex_lock(&g->start_work_mutex);
g->start_work = true;
mutex_unlock(&g->start_work_mutex);
cond_broadcast(&g->start_work_cond);
} else {
gettimeofday(&start, NULL);
}
/* Parent process: */
for (i = 0; i < g->p.nr_proc; i++) {
wpid = waitpid(pids[i], &wait_stat, 0);
BUG_ON(wpid < 0);
BUG_ON(!WIFEXITED(wait_stat));
}
runtime_ns_sum = 0;
runtime_ns_min = -1LL;
for (t = 0; t < g->p.nr_tasks; t++) {
u64 thread_runtime_ns = g->threads[t].runtime_ns;
runtime_ns_sum += thread_runtime_ns;
runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
}
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
tprintf("\n ###\n");
tprintf("\n");
runtime_sec_max = diff.tv_sec * NSEC_PER_SEC;
runtime_sec_max += diff.tv_usec * NSEC_PER_USEC;
runtime_sec_max /= NSEC_PER_SEC;
runtime_sec_min = runtime_ns_min / NSEC_PER_SEC;
bytes = g->bytes_done;
runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC;
if (g->p.measure_convergence) {
print_res(name, runtime_sec_max,
"secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
}
print_res(name, runtime_sec_max,
"secs,", "runtime-max/thread", "secs slowest (max) thread-runtime");
print_res(name, runtime_sec_min,
"secs,", "runtime-min/thread", "secs fastest (min) thread-runtime");
print_res(name, runtime_avg,
"secs,", "runtime-avg/thread", "secs average thread-runtime");
delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
print_res(name, delta_runtime / runtime_sec_max * 100.0,
"%,", "spread-runtime/thread", "% difference between max/avg runtime");
print_res(name, bytes / g->p.nr_tasks / 1e9,
"GB,", "data/thread", "GB data processed, per thread");
print_res(name, bytes / 1e9,
"GB,", "data-total", "GB data processed, total");
print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks),
"nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
"GB/sec,", "thread-speed", "GB/sec/thread speed");
print_res(name, bytes / runtime_sec_max / 1e9,
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
char tname[14 + 2 * 11 + 1];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
memset(tname, 0, sizeof(tname));
td = g->threads + p*g->p.nr_threads + t;
snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
print_res(tname, td->speed_gbs,
"GB/sec", "thread-speed", "GB/sec/thread speed");
print_res(tname, td->system_time_ns / NSEC_PER_SEC,
"secs", "thread-system-time", "system CPU time/thread");
print_res(tname, td->user_time_ns / NSEC_PER_SEC,
"secs", "thread-user-time", "user CPU time/thread");
}
}
}
free(pids);
deinit();
return 0;
}
#define MAX_ARGS 50
static int command_size(const char **argv)
{
int size = 0;
while (*argv) {
size++;
argv++;
}
BUG_ON(size >= MAX_ARGS);
return size;
}
static void init_params(struct params *p, const char *name, int argc, const char **argv)
{
int i;
printf("\n # Running %s \"perf bench numa", name);
for (i = 0; i < argc; i++)
printf(" %s", argv[i]);
printf("\"\n");
memset(p, 0, sizeof(*p));
/* Initialize nonzero defaults: */
p->serialize_startup = 1;
p->data_reads = true;
p->data_writes = true;
p->data_backwards = true;
p->data_rand_walk = true;
p->nr_loops = -1;
p->init_random = true;
p->mb_global_str = "1";
p->nr_proc = 1;
p->nr_threads = 1;
p->nr_secs = 5;
p->run_all = argc == 1;
}
static int run_bench_numa(const char *name, const char **argv)
{
int argc = command_size(argv);
init_params(&p0, name, argc, argv);
argc = parse_options(argc, argv, options, bench_numa_usage, 0);
if (argc)
goto err;
if (__bench_numa(name))
goto err;
return 0;
err:
return -1;
}
#define OPT_BW_RAM "-s", "20", "-zZq", "--thp", " 1", "--no-data_rand_walk"
#define OPT_BW_RAM_NOTHP OPT_BW_RAM, "--thp", "-1"
#define OPT_CONV "-s", "100", "-zZ0qcm", "--thp", " 1"
#define OPT_CONV_NOTHP OPT_CONV, "--thp", "-1"
#define OPT_BW "-s", "20", "-zZ0q", "--thp", " 1"
#define OPT_BW_NOTHP OPT_BW, "--thp", "-1"
/*
* The built-in test-suite executed by "perf bench numa -a".
*
* (A minimum of 4 nodes and 16 GB of RAM is recommended.)
*/
static const char *tests[][MAX_ARGS] = {
/* Basic single-stream NUMA bandwidth measurements: */
{ "RAM-bw-local,", "mem", "-p", "1", "-t", "1", "-P", "1024",
"-C" , "0", "-M", "0", OPT_BW_RAM },
{ "RAM-bw-local-NOTHP,",
"mem", "-p", "1", "-t", "1", "-P", "1024",
"-C" , "0", "-M", "0", OPT_BW_RAM_NOTHP },
{ "RAM-bw-remote,", "mem", "-p", "1", "-t", "1", "-P", "1024",
"-C" , "0", "-M", "1", OPT_BW_RAM },
/* 2-stream NUMA bandwidth measurements: */
{ "RAM-bw-local-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024",
"-C", "0,2", "-M", "0x2", OPT_BW_RAM },
{ "RAM-bw-remote-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024",
"-C", "0,2", "-M", "1x2", OPT_BW_RAM },
/* Cross-stream NUMA bandwidth measurement: */
{ "RAM-bw-cross,", "mem", "-p", "2", "-t", "1", "-P", "1024",
"-C", "0,8", "-M", "1,0", OPT_BW_RAM },
/* Convergence latency measurements: */
{ " 1x3-convergence,", "mem", "-p", "1", "-t", "3", "-P", "512", OPT_CONV },
{ " 1x4-convergence,", "mem", "-p", "1", "-t", "4", "-P", "512", OPT_CONV },
{ " 1x6-convergence,", "mem", "-p", "1", "-t", "6", "-P", "1020", OPT_CONV },
{ " 2x3-convergence,", "mem", "-p", "2", "-t", "3", "-P", "1020", OPT_CONV },
{ " 3x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV },
{ " 4x4-convergence,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV },
{ " 4x4-convergence-NOTHP,",
"mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV_NOTHP },
{ " 4x6-convergence,", "mem", "-p", "4", "-t", "6", "-P", "1020", OPT_CONV },
{ " 4x8-convergence,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_CONV },
{ " 8x4-convergence,", "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV },
{ " 8x4-convergence-NOTHP,",
"mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV_NOTHP },
{ " 3x1-convergence,", "mem", "-p", "3", "-t", "1", "-P", "512", OPT_CONV },
{ " 4x1-convergence,", "mem", "-p", "4", "-t", "1", "-P", "512", OPT_CONV },
{ " 8x1-convergence,", "mem", "-p", "8", "-t", "1", "-P", "512", OPT_CONV },
{ "16x1-convergence,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_CONV },
{ "32x1-convergence,", "mem", "-p", "32", "-t", "1", "-P", "128", OPT_CONV },
/* Various NUMA process/thread layout bandwidth measurements: */
{ " 2x1-bw-process,", "mem", "-p", "2", "-t", "1", "-P", "1024", OPT_BW },
{ " 3x1-bw-process,", "mem", "-p", "3", "-t", "1", "-P", "1024", OPT_BW },
{ " 4x1-bw-process,", "mem", "-p", "4", "-t", "1", "-P", "1024", OPT_BW },
{ " 8x1-bw-process,", "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW },
{ " 8x1-bw-process-NOTHP,",
"mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW_NOTHP },
{ "16x1-bw-process,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_BW },
{ " 1x4-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW },
{ " 1x8-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW },
{ "1x16-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW },
{ "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW },
{ " 2x3-bw-process,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW },
{ " 4x4-bw-process,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW },
{ " 4x6-bw-process,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW },
{ " 4x8-bw-process,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW },
{ " 4x8-bw-process-NOTHP,",
"mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW_NOTHP },
{ " 3x3-bw-process,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW },
{ " 5x5-bw-process,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW },
{ "2x16-bw-process,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW },
{ "1x32-bw-process,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW },
{ "numa02-bw,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW },
{ "numa02-bw-NOTHP,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW_NOTHP },
{ "numa01-bw-thread,", "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW },
{ "numa01-bw-thread-NOTHP,",
"mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW_NOTHP },
};
static int bench_all(void)
{
int nr = ARRAY_SIZE(tests);
int ret;
int i;
ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
BUG_ON(ret < 0);
for (i = 0; i < nr; i++) {
run_bench_numa(tests[i][0], tests[i] + 1);
}
printf("\n");
return 0;
}
int bench_numa(int argc, const char **argv)
{
init_params(&p0, "main,", argc, argv);
argc = parse_options(argc, argv, options, bench_numa_usage, 0);
if (argc)
goto err;
if (p0.run_all)
return bench_all();
if (__bench_numa(NULL))
goto err;
return 0;
err:
usage_with_options(numa_usage, options);
return -1;
}
| linux-master | tools/perf/bench/numa.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Benchmark synthesis of perf events such as at the start of a 'perf
* record'. Synthesis is done on the current process and the 'dummy' event
* handlers are invoked that support dump_trace but otherwise do nothing.
*
* Copyright 2019 Google LLC.
*/
#include <stdio.h>
#include "bench.h"
#include "../util/debug.h"
#include "../util/session.h"
#include "../util/stat.h"
#include "../util/synthetic-events.h"
#include "../util/target.h"
#include "../util/thread_map.h"
#include "../util/tool.h"
#include "../util/util.h"
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/time64.h>
#include <subcmd/parse-options.h>
static unsigned int min_threads = 1;
static unsigned int max_threads = UINT_MAX;
static unsigned int single_iterations = 10000;
static unsigned int multi_iterations = 10;
static bool run_st;
static bool run_mt;
static const struct option options[] = {
OPT_BOOLEAN('s', "st", &run_st, "Run single threaded benchmark"),
OPT_BOOLEAN('t', "mt", &run_mt, "Run multi-threaded benchmark"),
OPT_UINTEGER('m', "min-threads", &min_threads,
"Minimum number of threads in multithreaded bench"),
OPT_UINTEGER('M', "max-threads", &max_threads,
"Maximum number of threads in multithreaded bench"),
OPT_UINTEGER('i', "single-iterations", &single_iterations,
"Number of iterations used to compute single-threaded average"),
OPT_UINTEGER('I', "multi-iterations", &multi_iterations,
"Number of iterations used to compute multi-threaded average"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench internals synthesize <options>",
NULL
};
static atomic_t event_count;
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
atomic_inc(&event_count);
return 0;
}
static int do_run_single_threaded(struct perf_session *session,
struct perf_thread_map *threads,
struct target *target, bool data_mmap)
{
const unsigned int nr_threads_synthesize = 1;
struct timeval start, end, diff;
u64 runtime_us;
unsigned int i;
double time_average, time_stddev, event_average, event_stddev;
int err;
struct stats time_stats, event_stats;
init_stats(&time_stats);
init_stats(&event_stats);
for (i = 0; i < single_iterations; i++) {
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
err = __machine__synthesize_threads(&session->machines.host,
NULL,
target, threads,
process_synthesized_event,
true, data_mmap,
nr_threads_synthesize);
if (err)
return err;
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&time_stats, runtime_us);
update_stats(&event_stats, atomic_read(&event_count));
}
time_average = avg_stats(&time_stats);
time_stddev = stddev_stats(&time_stats);
printf(" Average %ssynthesis took: %.3f usec (+- %.3f usec)\n",
data_mmap ? "data " : "", time_average, time_stddev);
event_average = avg_stats(&event_stats);
event_stddev = stddev_stats(&event_stats);
printf(" Average num. events: %.3f (+- %.3f)\n",
event_average, event_stddev);
printf(" Average time per event %.3f usec\n",
time_average / event_average);
return 0;
}
static int run_single_threaded(void)
{
struct perf_session *session;
struct target target = {
.pid = "self",
};
struct perf_thread_map *threads;
int err;
perf_set_singlethreaded();
session = perf_session__new(NULL, NULL);
if (IS_ERR(session)) {
pr_err("Session creation failed.\n");
return PTR_ERR(session);
}
threads = thread_map__new_by_pid(getpid());
if (!threads) {
pr_err("Thread map creation failed.\n");
err = -ENOMEM;
goto err_out;
}
puts(
"Computing performance of single threaded perf event synthesis by\n"
"synthesizing events on the perf process itself:");
err = do_run_single_threaded(session, threads, &target, false);
if (err)
goto err_out;
err = do_run_single_threaded(session, threads, &target, true);
err_out:
if (threads)
perf_thread_map__put(threads);
perf_session__delete(session);
return err;
}
static int do_run_multi_threaded(struct target *target,
unsigned int nr_threads_synthesize)
{
struct timeval start, end, diff;
u64 runtime_us;
unsigned int i;
double time_average, time_stddev, event_average, event_stddev;
int err;
struct stats time_stats, event_stats;
struct perf_session *session;
init_stats(&time_stats);
init_stats(&event_stats);
for (i = 0; i < multi_iterations; i++) {
session = perf_session__new(NULL, NULL);
if (IS_ERR(session))
return PTR_ERR(session);
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
err = __machine__synthesize_threads(&session->machines.host,
NULL,
target, NULL,
process_synthesized_event,
true, false,
nr_threads_synthesize);
if (err) {
perf_session__delete(session);
return err;
}
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&time_stats, runtime_us);
update_stats(&event_stats, atomic_read(&event_count));
perf_session__delete(session);
}
time_average = avg_stats(&time_stats);
time_stddev = stddev_stats(&time_stats);
printf(" Average synthesis took: %.3f usec (+- %.3f usec)\n",
time_average, time_stddev);
event_average = avg_stats(&event_stats);
event_stddev = stddev_stats(&event_stats);
printf(" Average num. events: %.3f (+- %.3f)\n",
event_average, event_stddev);
printf(" Average time per event %.3f usec\n",
time_average / event_average);
return 0;
}
static int run_multi_threaded(void)
{
struct target target = {
.cpu_list = "0"
};
unsigned int nr_threads_synthesize;
int err;
if (max_threads == UINT_MAX)
max_threads = sysconf(_SC_NPROCESSORS_ONLN);
puts(
"Computing performance of multi threaded perf event synthesis by\n"
"synthesizing events on CPU 0:");
for (nr_threads_synthesize = min_threads;
nr_threads_synthesize <= max_threads;
nr_threads_synthesize++) {
if (nr_threads_synthesize == 1)
perf_set_singlethreaded();
else
perf_set_multithreaded();
printf(" Number of synthesis threads: %u\n",
nr_threads_synthesize);
err = do_run_multi_threaded(&target, nr_threads_synthesize);
if (err)
return err;
}
perf_set_singlethreaded();
return 0;
}
int bench_synthesize(int argc, const char **argv)
{
int err = 0;
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
/*
* If neither single threaded or multi-threaded are specified, default
* to running just single threaded.
*/
if (!run_st && !run_mt)
run_st = true;
if (run_st)
err = run_single_threaded();
if (!err && run_mt)
err = run_multi_threaded();
return err;
}
| linux-master | tools/perf/bench/synthesize.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Davidlohr Bueso <[email protected]>
*
* futex-requeue: Block a bunch of threads on futex1 and requeue them
* on futex2, N at a time.
*
* This program is particularly useful to measure the latency of nthread
* requeues without waking up any tasks (in the non-pi case) -- thus
* mimicking a regular futex_wait.
*/
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <signal.h>
#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/mman.h>
static u_int32_t futex1 = 0, futex2 = 0;
static pthread_t *worker;
static bool done = false;
static struct mutex thread_lock;
static struct cond thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
static unsigned int threads_starting;
static int futex_flag = 0;
static struct bench_futex_parameters params = {
/*
* How many tasks to requeue at a time.
* Default to 1 in order to make the kernel work more.
*/
.nrequeue = 1,
};
static const struct option options[] = {
OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"),
OPT_UINTEGER('q', "nrequeue", ¶ms.nrequeue, "Specify amount of threads to requeue at once"),
OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"),
OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"),
OPT_BOOLEAN( 'B', "broadcast", ¶ms.broadcast, "Requeue all threads at once"),
OPT_BOOLEAN( 'p', "pi", ¶ms.pi, "Use PI-aware variants of FUTEX_CMP_REQUEUE"),
OPT_END()
};
static const char * const bench_futex_requeue_usage[] = {
"perf bench futex requeue <options>",
NULL
};
static void print_summary(void)
{
double requeuetime_avg = avg_stats(&requeuetime_stats);
double requeuetime_stddev = stddev_stats(&requeuetime_stats);
unsigned int requeued_avg = avg_stats(&requeued_stats);
printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n",
requeued_avg,
params.nthreads,
requeuetime_avg / USEC_PER_MSEC,
rel_stddev_stats(requeuetime_stddev, requeuetime_avg));
}
static void *workerfn(void *arg __maybe_unused)
{
int ret;
mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
cond_signal(&thread_parent);
cond_wait(&thread_worker, &thread_lock);
mutex_unlock(&thread_lock);
while (1) {
if (!params.pi) {
ret = futex_wait(&futex1, 0, NULL, futex_flag);
if (!ret)
break;
if (ret && errno != EAGAIN) {
if (!params.silent)
warnx("futex_wait");
break;
}
} else {
ret = futex_wait_requeue_pi(&futex1, 0, &futex2,
NULL, futex_flag);
if (!ret) {
/* got the lock at futex2 */
futex_unlock_pi(&futex2, futex_flag);
break;
}
if (ret && errno != EAGAIN) {
if (!params.silent)
warnx("futex_wait_requeue_pi");
break;
}
}
}
return NULL;
}
static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
{
cpu_set_t *cpuset;
unsigned int i;
int nrcpus = perf_cpu_map__nr(cpu);
size_t size;
threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */
for (i = 0; i < params.nthreads; i++) {
pthread_attr_t thread_attr;
pthread_attr_init(&thread_attr);
CPU_ZERO_S(size, cpuset);
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
}
pthread_attr_destroy(&thread_attr);
}
CPU_FREE(cpuset);
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_requeue(int argc, const char **argv)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
if (argc)
goto err;
cpu = perf_cpu_map__new(NULL);
if (!cpu)
err(EXIT_FAILURE, "cpu_map__new");
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (params.mlockall) {
if (mlockall(MCL_CURRENT | MCL_FUTURE))
err(EXIT_FAILURE, "mlockall");
}
if (!params.nthreads)
params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
if (params.nrequeue > params.nthreads)
params.nrequeue = params.nthreads;
if (params.broadcast)
params.nrequeue = params.nthreads;
printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %s%p), "
"%d at a time.\n\n", getpid(), params.nthreads,
params.fshared ? "shared":"private", &futex1,
params.pi ? "PI ": "", &futex2, params.nrequeue);
init_stats(&requeued_stats);
init_stats(&requeuetime_stats);
mutex_init(&thread_lock);
cond_init(&thread_parent);
cond_init(&thread_worker);
for (j = 0; j < bench_repeat && !done; j++) {
unsigned int nrequeued = 0, wakeups = 0;
struct timeval start, end, runtime;
/* create, launch & block all threads */
block_threads(worker, cpu);
/* make sure all threads are already blocked */
mutex_lock(&thread_lock);
while (threads_starting)
cond_wait(&thread_parent, &thread_lock);
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start requeueing */
gettimeofday(&start, NULL);
while (nrequeued < params.nthreads) {
int r;
/*
* For the regular non-pi case, do not wakeup any tasks
* blocked on futex1, allowing us to really measure
* futex_wait functionality. For the PI case the first
* waiter is always awoken.
*/
if (!params.pi) {
r = futex_cmp_requeue(&futex1, 0, &futex2, 0,
params.nrequeue,
futex_flag);
} else {
r = futex_cmp_requeue_pi(&futex1, 0, &futex2,
params.nrequeue,
futex_flag);
wakeups++; /* assume no error */
}
if (r < 0)
err(EXIT_FAILURE, "couldn't requeue from %p to %p",
&futex1, &futex2);
nrequeued += r;
}
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
update_stats(&requeued_stats, nrequeued);
update_stats(&requeuetime_stats, runtime.tv_usec);
if (!params.silent) {
if (!params.pi)
printf("[Run %d]: Requeued %d of %d threads in "
"%.4f ms\n", j + 1, nrequeued,
params.nthreads,
runtime.tv_usec / (double)USEC_PER_MSEC);
else {
nrequeued -= wakeups;
printf("[Run %d]: Awoke and Requeued (%d+%d) of "
"%d threads in %.4f ms\n",
j + 1, wakeups, nrequeued,
params.nthreads,
runtime.tv_usec / (double)USEC_PER_MSEC);
}
}
if (!params.pi) {
/* everybody should be blocked on futex2, wake'em up */
nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
if (params.nthreads != nrequeued)
warnx("couldn't wakeup all tasks (%d/%d)",
nrequeued, params.nthreads);
}
for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
}
/* cleanup & report results */
cond_destroy(&thread_parent);
cond_destroy(&thread_worker);
mutex_destroy(&thread_lock);
print_summary();
free(worker);
perf_cpu_map__put(cpu);
return ret;
err:
usage_with_options(bench_futex_requeue_usage, options);
exit(EXIT_FAILURE);
}
| linux-master | tools/perf/bench/futex-requeue.c |
// SPDX-License-Identifier: GPL-2.0
/*
*
* sched-pipe.c
*
* pipe: Benchmark for pipe()
*
* Based on pipe-test-1m.c by Ingo Molnar <[email protected]>
* http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c
* Ported to perf by Hitoshi Mitake <[email protected]>
*/
#include <subcmd/parse-options.h>
#include "bench.h"
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/wait.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <linux/time64.h>
#include <pthread.h>
struct thread_data {
int nr;
int pipe_read;
int pipe_write;
pthread_t pthread;
};
#define LOOPS_DEFAULT 1000000
static int loops = LOOPS_DEFAULT;
/* Use processes by default: */
static bool threaded;
static const struct option options[] = {
OPT_INTEGER('l', "loop", &loops, "Specify number of loops"),
OPT_BOOLEAN('T', "threaded", &threaded, "Specify threads/process based task setup"),
OPT_END()
};
static const char * const bench_sched_pipe_usage[] = {
"perf bench sched pipe <options>",
NULL
};
static void *worker_thread(void *__tdata)
{
struct thread_data *td = __tdata;
int m = 0, i;
int ret;
for (i = 0; i < loops; i++) {
if (!td->nr) {
ret = read(td->pipe_read, &m, sizeof(int));
BUG_ON(ret != sizeof(int));
ret = write(td->pipe_write, &m, sizeof(int));
BUG_ON(ret != sizeof(int));
} else {
ret = write(td->pipe_write, &m, sizeof(int));
BUG_ON(ret != sizeof(int));
ret = read(td->pipe_read, &m, sizeof(int));
BUG_ON(ret != sizeof(int));
}
}
return NULL;
}
int bench_sched_pipe(int argc, const char **argv)
{
struct thread_data threads[2], *td;
int pipe_1[2], pipe_2[2];
struct timeval start, stop, diff;
unsigned long long result_usec = 0;
int nr_threads = 2;
int t;
/*
* why does "ret" exist?
* discarding returned value of read(), write()
* causes error in building environment for perf
*/
int __maybe_unused ret, wait_stat;
pid_t pid, retpid __maybe_unused;
argc = parse_options(argc, argv, options, bench_sched_pipe_usage, 0);
BUG_ON(pipe(pipe_1));
BUG_ON(pipe(pipe_2));
gettimeofday(&start, NULL);
for (t = 0; t < nr_threads; t++) {
td = threads + t;
td->nr = t;
if (t == 0) {
td->pipe_read = pipe_1[0];
td->pipe_write = pipe_2[1];
} else {
td->pipe_write = pipe_1[1];
td->pipe_read = pipe_2[0];
}
}
if (threaded) {
for (t = 0; t < nr_threads; t++) {
td = threads + t;
ret = pthread_create(&td->pthread, NULL, worker_thread, td);
BUG_ON(ret);
}
for (t = 0; t < nr_threads; t++) {
td = threads + t;
ret = pthread_join(td->pthread, NULL);
BUG_ON(ret);
}
} else {
pid = fork();
assert(pid >= 0);
if (!pid) {
worker_thread(threads + 0);
exit(0);
} else {
worker_thread(threads + 1);
}
retpid = waitpid(pid, &wait_stat, 0);
assert((retpid == pid) && WIFEXITED(wait_stat));
}
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
printf("# Executed %d pipe operations between two %s\n\n",
loops, threaded ? "threads" : "processes");
result_usec = diff.tv_sec * USEC_PER_SEC;
result_usec += diff.tv_usec;
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
(unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
printf(" %14lf usecs/op\n",
(double)result_usec / (double)loops);
printf(" %14d ops/sec\n",
(int)((double)loops /
((double)result_usec / (double)USEC_PER_SEC)));
break;
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n",
(unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
break;
default:
/* reaching here is something disaster */
fprintf(stderr, "Unknown format:%d\n", bench_format);
exit(1);
break;
}
return 0;
}
| linux-master | tools/perf/bench/sched-pipe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Benchmark of /proc/kallsyms parsing.
*
* Copyright 2020 Google LLC.
*/
#include <stdlib.h>
#include "bench.h"
#include "../util/stat.h"
#include <linux/time64.h>
#include <subcmd/parse-options.h>
#include <symbol/kallsyms.h>
static unsigned int iterations = 100;
static const struct option options[] = {
OPT_UINTEGER('i', "iterations", &iterations,
"Number of iterations used to compute average"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench internals kallsyms-parse <options>",
NULL
};
static int bench_process_symbol(void *arg __maybe_unused,
const char *name __maybe_unused,
char type __maybe_unused,
u64 start __maybe_unused)
{
return 0;
}
static int do_kallsyms_parse(void)
{
struct timeval start, end, diff;
u64 runtime_us;
unsigned int i;
double time_average, time_stddev;
int err;
struct stats time_stats;
init_stats(&time_stats);
for (i = 0; i < iterations; i++) {
gettimeofday(&start, NULL);
err = kallsyms__parse("/proc/kallsyms", NULL,
bench_process_symbol);
if (err)
return err;
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&time_stats, runtime_us);
}
time_average = avg_stats(&time_stats) / USEC_PER_MSEC;
time_stddev = stddev_stats(&time_stats) / USEC_PER_MSEC;
printf(" Average kallsyms__parse took: %.3f ms (+- %.3f ms)\n",
time_average, time_stddev);
return 0;
}
int bench_kallsyms_parse(int argc, const char **argv)
{
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
return do_kallsyms_parse();
}
| linux-master | tools/perf/bench/kallsyms-parse.c |
// SPDX-License-Identifier: GPL-2.0
#include <subcmd/parse-options.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/time64.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <pthread.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include "bench.h"
#include "futex.h"
struct {
unsigned int nbreakpoints;
unsigned int nparallel;
unsigned int nthreads;
} thread_params = {
.nbreakpoints = 1,
.nparallel = 1,
.nthreads = 1,
};
static const struct option thread_options[] = {
OPT_UINTEGER('b', "breakpoints", &thread_params.nbreakpoints,
"Specify amount of breakpoints"),
OPT_UINTEGER('p', "parallelism", &thread_params.nparallel, "Specify amount of parallelism"),
OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"),
OPT_END()
};
static const char * const thread_usage[] = {
"perf bench breakpoint thread <options>",
NULL
};
struct breakpoint {
int fd;
char watched;
};
static int breakpoint_setup(void *addr)
{
struct perf_event_attr attr = { .size = 0, };
int fd;
attr.type = PERF_TYPE_BREAKPOINT;
attr.size = sizeof(attr);
attr.inherit = 1;
attr.exclude_kernel = 1;
attr.exclude_hv = 1;
attr.bp_addr = (unsigned long)addr;
attr.bp_type = HW_BREAKPOINT_RW;
attr.bp_len = HW_BREAKPOINT_LEN_1;
fd = syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
if (fd < 0)
fd = -errno;
return fd;
}
static void *passive_thread(void *arg)
{
unsigned int *done = (unsigned int *)arg;
while (!__atomic_load_n(done, __ATOMIC_RELAXED))
futex_wait(done, 0, NULL, 0);
return NULL;
}
static void *active_thread(void *arg)
{
unsigned int *done = (unsigned int *)arg;
while (!__atomic_load_n(done, __ATOMIC_RELAXED));
return NULL;
}
static void *breakpoint_thread(void *arg)
{
unsigned int i, done;
int *repeat = (int *)arg;
pthread_t *threads;
threads = calloc(thread_params.nthreads, sizeof(threads[0]));
if (!threads)
exit((perror("calloc"), EXIT_FAILURE));
while (__atomic_fetch_sub(repeat, 1, __ATOMIC_RELAXED) > 0) {
done = 0;
for (i = 0; i < thread_params.nthreads; i++) {
if (pthread_create(&threads[i], NULL, passive_thread, &done))
exit((perror("pthread_create"), EXIT_FAILURE));
}
__atomic_store_n(&done, 1, __ATOMIC_RELAXED);
futex_wake(&done, thread_params.nthreads, 0);
for (i = 0; i < thread_params.nthreads; i++)
pthread_join(threads[i], NULL);
}
free(threads);
return NULL;
}
// The benchmark creates nbreakpoints inheritable breakpoints,
// then starts nparallel threads which create and join bench_repeat batches of nthreads threads.
int bench_breakpoint_thread(int argc, const char **argv)
{
unsigned int i, result_usec;
int repeat = bench_repeat;
struct breakpoint *breakpoints;
pthread_t *parallel;
struct timeval start, stop, diff;
if (parse_options(argc, argv, thread_options, thread_usage, 0)) {
usage_with_options(thread_usage, thread_options);
exit(EXIT_FAILURE);
}
breakpoints = calloc(thread_params.nbreakpoints, sizeof(breakpoints[0]));
parallel = calloc(thread_params.nparallel, sizeof(parallel[0]));
if (!breakpoints || !parallel)
exit((perror("calloc"), EXIT_FAILURE));
for (i = 0; i < thread_params.nbreakpoints; i++) {
breakpoints[i].fd = breakpoint_setup(&breakpoints[i].watched);
if (breakpoints[i].fd < 0) {
if (breakpoints[i].fd == -ENODEV) {
printf("Skipping perf bench breakpoint thread: No hardware support\n");
return 0;
}
exit((perror("perf_event_open"), EXIT_FAILURE));
}
}
gettimeofday(&start, NULL);
for (i = 0; i < thread_params.nparallel; i++) {
if (pthread_create(¶llel[i], NULL, breakpoint_thread, &repeat))
exit((perror("pthread_create"), EXIT_FAILURE));
}
for (i = 0; i < thread_params.nparallel; i++)
pthread_join(parallel[i], NULL);
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
for (i = 0; i < thread_params.nbreakpoints; i++)
close(breakpoints[i].fd);
free(parallel);
free(breakpoints);
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
printf("# Created/joined %d threads with %d breakpoints and %d parallelism\n",
bench_repeat, thread_params.nbreakpoints, thread_params.nparallel);
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
(long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
result_usec = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
printf(" %14lf usecs/op\n",
(double)result_usec / bench_repeat / thread_params.nthreads);
printf(" %14lf usecs/op/cpu\n",
(double)result_usec / bench_repeat /
thread_params.nthreads * thread_params.nparallel);
break;
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n", (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
break;
default:
fprintf(stderr, "Unknown format: %d\n", bench_format);
exit(EXIT_FAILURE);
}
return 0;
}
struct {
unsigned int npassive;
unsigned int nactive;
} enable_params = {
.nactive = 0,
.npassive = 0,
};
static const struct option enable_options[] = {
OPT_UINTEGER('p', "passive", &enable_params.npassive, "Specify amount of passive threads"),
OPT_UINTEGER('a', "active", &enable_params.nactive, "Specify amount of active threads"),
OPT_END()
};
static const char * const enable_usage[] = {
"perf bench breakpoint enable <options>",
NULL
};
// The benchmark creates an inheritable breakpoint,
// then starts npassive threads that block and nactive threads that actively spin
// and then disables and enables the breakpoint bench_repeat times.
int bench_breakpoint_enable(int argc, const char **argv)
{
unsigned int i, nthreads, result_usec, done = 0;
char watched;
int fd;
pthread_t *threads;
struct timeval start, stop, diff;
if (parse_options(argc, argv, enable_options, enable_usage, 0)) {
usage_with_options(enable_usage, enable_options);
exit(EXIT_FAILURE);
}
fd = breakpoint_setup(&watched);
if (fd < 0) {
if (fd == -ENODEV) {
printf("Skipping perf bench breakpoint enable: No hardware support\n");
return 0;
}
exit((perror("perf_event_open"), EXIT_FAILURE));
}
nthreads = enable_params.npassive + enable_params.nactive;
threads = calloc(nthreads, sizeof(threads[0]));
if (!threads)
exit((perror("calloc"), EXIT_FAILURE));
for (i = 0; i < nthreads; i++) {
if (pthread_create(&threads[i], NULL,
i < enable_params.npassive ? passive_thread : active_thread, &done))
exit((perror("pthread_create"), EXIT_FAILURE));
}
usleep(10000); // let the threads block
gettimeofday(&start, NULL);
for (i = 0; i < bench_repeat; i++) {
if (ioctl(fd, PERF_EVENT_IOC_DISABLE, 0))
exit((perror("ioctl(PERF_EVENT_IOC_DISABLE)"), EXIT_FAILURE));
if (ioctl(fd, PERF_EVENT_IOC_ENABLE, 0))
exit((perror("ioctl(PERF_EVENT_IOC_ENABLE)"), EXIT_FAILURE));
}
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
__atomic_store_n(&done, 1, __ATOMIC_RELAXED);
futex_wake(&done, enable_params.npassive, 0);
for (i = 0; i < nthreads; i++)
pthread_join(threads[i], NULL);
free(threads);
close(fd);
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
printf("# Enabled/disabled breakpoint %d time with %d passive and %d active threads\n",
bench_repeat, enable_params.npassive, enable_params.nactive);
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
(long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
result_usec = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
printf(" %14lf usecs/op\n", (double)result_usec / bench_repeat);
break;
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n", (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
break;
default:
fprintf(stderr, "Unknown format: %d\n", bench_format);
exit(EXIT_FAILURE);
}
return 0;
}
| linux-master | tools/perf/bench/breakpoint.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Davidlohr Bueso.
*
* Block a bunch of threads and let parallel waker threads wakeup an
* equal amount of them. The program output reflects the avg latency
* for each individual thread to service its share of work. Ultimately
* it can be used to measure futex_wake() changes.
*/
#include "bench.h"
#include <linux/compiler.h>
#include "../util/debug.h"
#include "../util/mutex.h"
#ifndef HAVE_PTHREAD_BARRIER
int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
{
pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
return 0;
}
#else /* HAVE_PTHREAD_BARRIER */
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <signal.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
#include "futex.h"
#include <perf/cpumap.h>
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/mman.h>
struct thread_data {
pthread_t worker;
unsigned int nwoken;
struct timeval runtime;
};
static unsigned int nwakes = 1;
/* all threads will block on the same futex -- hash bucket chaos ;) */
static u_int32_t futex = 0;
static pthread_t *blocked_worker;
static bool done = false;
static struct mutex thread_lock;
static struct cond thread_parent, thread_worker;
static pthread_barrier_t barrier;
static struct stats waketime_stats, wakeup_stats;
static unsigned int threads_starting;
static int futex_flag = 0;
static struct bench_futex_parameters params;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakers", ¶ms.nwakes, "Specify amount of waking threads"),
OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"),
OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"),
OPT_END()
};
static const char * const bench_futex_wake_parallel_usage[] = {
"perf bench futex wake-parallel <options>",
NULL
};
static void *waking_workerfn(void *arg)
{
struct thread_data *waker = (struct thread_data *) arg;
struct timeval start, end;
pthread_barrier_wait(&barrier);
gettimeofday(&start, NULL);
waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
if (waker->nwoken != nwakes)
warnx("couldn't wakeup all tasks (%d/%d)",
waker->nwoken, nwakes);
gettimeofday(&end, NULL);
timersub(&end, &start, &waker->runtime);
pthread_exit(NULL);
return NULL;
}
static void wakeup_threads(struct thread_data *td)
{
unsigned int i;
pthread_attr_t thread_attr;
pthread_attr_init(&thread_attr);
pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
pthread_barrier_init(&barrier, NULL, params.nwakes + 1);
/* create and block all threads */
for (i = 0; i < params.nwakes; i++) {
/*
* Thread creation order will impact per-thread latency
* as it will affect the order to acquire the hb spinlock.
* For now let the scheduler decide.
*/
if (pthread_create(&td[i].worker, &thread_attr,
waking_workerfn, (void *)&td[i]))
err(EXIT_FAILURE, "pthread_create");
}
pthread_barrier_wait(&barrier);
for (i = 0; i < params.nwakes; i++)
if (pthread_join(td[i].worker, NULL))
err(EXIT_FAILURE, "pthread_join");
pthread_barrier_destroy(&barrier);
pthread_attr_destroy(&thread_attr);
}
static void *blocked_workerfn(void *arg __maybe_unused)
{
mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
cond_signal(&thread_parent);
cond_wait(&thread_worker, &thread_lock);
mutex_unlock(&thread_lock);
while (1) { /* handle spurious wakeups */
if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
break;
}
pthread_exit(NULL);
return NULL;
}
static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
{
cpu_set_t *cpuset;
unsigned int i;
int nrcpus = perf_cpu_map__nr(cpu);
size_t size;
threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */
for (i = 0; i < params.nthreads; i++) {
pthread_attr_t thread_attr;
pthread_attr_init(&thread_attr);
CPU_ZERO_S(size, cpuset);
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
}
pthread_attr_destroy(&thread_attr);
}
CPU_FREE(cpuset);
}
static void print_run(struct thread_data *waking_worker, unsigned int run_num)
{
unsigned int i, wakeup_avg;
double waketime_avg, waketime_stddev;
struct stats __waketime_stats, __wakeup_stats;
init_stats(&__wakeup_stats);
init_stats(&__waketime_stats);
for (i = 0; i < params.nwakes; i++) {
update_stats(&__waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&__wakeup_stats, waking_worker[i].nwoken);
}
waketime_avg = avg_stats(&__waketime_stats);
waketime_stddev = stddev_stats(&__waketime_stats);
wakeup_avg = avg_stats(&__wakeup_stats);
printf("[Run %d]: Avg per-thread latency (waking %d/%d threads) "
"in %.4f ms (+-%.2f%%)\n", run_num + 1, wakeup_avg,
params.nthreads, waketime_avg / USEC_PER_MSEC,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void print_summary(void)
{
unsigned int wakeup_avg;
double waketime_avg, waketime_stddev;
waketime_avg = avg_stats(&waketime_stats);
waketime_stddev = stddev_stats(&waketime_stats);
wakeup_avg = avg_stats(&wakeup_stats);
printf("Avg per-thread latency (waking %d/%d threads) in %.4f ms (+-%.2f%%)\n",
wakeup_avg,
params.nthreads,
waketime_avg / USEC_PER_MSEC,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void do_run_stats(struct thread_data *waking_worker)
{
unsigned int i;
for (i = 0; i < params.nwakes; i++) {
update_stats(&waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&wakeup_stats, waking_worker[i].nwoken);
}
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_wake_parallel(int argc, const char **argv)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
struct thread_data *waking_worker;
struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options,
bench_futex_wake_parallel_usage, 0);
if (argc) {
usage_with_options(bench_futex_wake_parallel_usage, options);
exit(EXIT_FAILURE);
}
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (params.mlockall) {
if (mlockall(MCL_CURRENT | MCL_FUTURE))
err(EXIT_FAILURE, "mlockall");
}
cpu = perf_cpu_map__new(NULL);
if (!cpu)
err(EXIT_FAILURE, "calloc");
if (!params.nthreads)
params.nthreads = perf_cpu_map__nr(cpu);
/* some sanity checks */
if (params.nwakes > params.nthreads ||
!params.nwakes)
params.nwakes = params.nthreads;
if (params.nthreads % params.nwakes)
errx(EXIT_FAILURE, "Must be perfectly divisible");
/*
* Each thread will wakeup nwakes tasks in
* a single futex_wait call.
*/
nwakes = params.nthreads/params.nwakes;
blocked_worker = calloc(params.nthreads, sizeof(*blocked_worker));
if (!blocked_worker)
err(EXIT_FAILURE, "calloc");
if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
"futex %p), %d threads waking up %d at a time.\n\n",
getpid(), params.nthreads, params.fshared ? "shared":"private",
&futex, params.nwakes, nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
mutex_init(&thread_lock);
cond_init(&thread_parent);
cond_init(&thread_worker);
for (j = 0; j < bench_repeat && !done; j++) {
waking_worker = calloc(params.nwakes, sizeof(*waking_worker));
if (!waking_worker)
err(EXIT_FAILURE, "calloc");
/* create, launch & block all threads */
block_threads(blocked_worker, cpu);
/* make sure all threads are already blocked */
mutex_lock(&thread_lock);
while (threads_starting)
cond_wait(&thread_parent, &thread_lock);
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start waking folks up */
wakeup_threads(waking_worker);
for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(blocked_worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
do_run_stats(waking_worker);
if (!params.silent)
print_run(waking_worker, j);
free(waking_worker);
}
/* cleanup & report results */
cond_destroy(&thread_parent);
cond_destroy(&thread_worker);
mutex_destroy(&thread_lock);
print_summary();
free(blocked_worker);
perf_cpu_map__put(cpu);
return ret;
}
#endif /* HAVE_PTHREAD_BARRIER */
| linux-master | tools/perf/bench/futex-wake-parallel.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Benchmark find_next_bit and related bit operations.
*
* Copyright 2020 Google LLC.
*/
#include <stdlib.h>
#include "bench.h"
#include "../util/stat.h"
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/time64.h>
#include <subcmd/parse-options.h>
static unsigned int outer_iterations = 5;
static unsigned int inner_iterations = 100000;
static const struct option options[] = {
OPT_UINTEGER('i', "outer-iterations", &outer_iterations,
"Number of outer iterations used"),
OPT_UINTEGER('j', "inner-iterations", &inner_iterations,
"Number of inner iterations used"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench mem find_bit <options>",
NULL
};
static unsigned int accumulator;
static unsigned int use_of_val;
static noinline void workload(int val)
{
use_of_val += val;
accumulator++;
}
#if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__)
static bool asm_test_bit(long nr, const unsigned long *addr)
{
bool oldbit;
asm volatile("bt %2,%1"
: "=@ccc" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit;
}
#else
#define asm_test_bit test_bit
#endif
static int do_for_each_set_bit(unsigned int num_bits)
{
unsigned long *to_test = bitmap_zalloc(num_bits);
struct timeval start, end, diff;
u64 runtime_us;
struct stats fb_time_stats, tb_time_stats;
double time_average, time_stddev;
unsigned int bit, i, j;
unsigned int set_bits, skip;
init_stats(&fb_time_stats);
init_stats(&tb_time_stats);
for (set_bits = 1; set_bits <= num_bits; set_bits <<= 1) {
bitmap_zero(to_test, num_bits);
skip = num_bits / set_bits;
for (i = 0; i < num_bits; i += skip)
__set_bit(i, to_test);
for (i = 0; i < outer_iterations; i++) {
#ifndef NDEBUG
unsigned int old = accumulator;
#endif
gettimeofday(&start, NULL);
for (j = 0; j < inner_iterations; j++) {
for_each_set_bit(bit, to_test, num_bits)
workload(bit);
}
gettimeofday(&end, NULL);
assert(old + (inner_iterations * set_bits) == accumulator);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&fb_time_stats, runtime_us);
#ifndef NDEBUG
old = accumulator;
#endif
gettimeofday(&start, NULL);
for (j = 0; j < inner_iterations; j++) {
for (bit = 0; bit < num_bits; bit++) {
if (asm_test_bit(bit, to_test))
workload(bit);
}
}
gettimeofday(&end, NULL);
assert(old + (inner_iterations * set_bits) == accumulator);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&tb_time_stats, runtime_us);
}
printf("%d operations %d bits set of %d bits\n",
inner_iterations, set_bits, num_bits);
time_average = avg_stats(&fb_time_stats);
time_stddev = stddev_stats(&fb_time_stats);
printf(" Average for_each_set_bit took: %.3f usec (+- %.3f usec)\n",
time_average, time_stddev);
time_average = avg_stats(&tb_time_stats);
time_stddev = stddev_stats(&tb_time_stats);
printf(" Average test_bit loop took: %.3f usec (+- %.3f usec)\n",
time_average, time_stddev);
if (use_of_val == accumulator) /* Try to avoid compiler tricks. */
printf("\n");
}
bitmap_free(to_test);
return 0;
}
int bench_mem_find_bit(int argc, const char **argv)
{
int err = 0, i;
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
for (i = 1; i <= 2048; i <<= 1)
do_for_each_set_bit(i);
return err;
}
| linux-master | tools/perf/bench/find-bit-bench.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include "bench.h"
#include "../util/debug.h"
#include "../util/stat.h"
#include "../util/evlist.h"
#include "../util/evsel.h"
#include "../util/strbuf.h"
#include "../util/record.h"
#include "../util/parse-events.h"
#include "internal/threadmap.h"
#include "internal/cpumap.h"
#include <linux/perf_event.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <linux/string.h>
#include <subcmd/parse-options.h>
#define MMAP_FLUSH_DEFAULT 1
static int iterations = 100;
static int nr_events = 1;
static const char *event_string = "dummy";
static inline u64 timeval2usec(struct timeval *tv)
{
return tv->tv_sec * USEC_PER_SEC + tv->tv_usec;
}
static struct record_opts opts = {
.sample_time = true,
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 4000,
.target = {
.uses_mmap = true,
.default_per_cpu = true,
},
.mmap_flush = MMAP_FLUSH_DEFAULT,
.nr_threads_synthesize = 1,
.ctl_fd = -1,
.ctl_fd_ack = -1,
};
static const struct option options[] = {
OPT_STRING('e', "event", &event_string, "event", "event selector. use 'perf list' to list available events"),
OPT_INTEGER('n', "nr-events", &nr_events,
"number of dummy events to create (default 1). If used with -e, it clones those events n times (1 = no change)"),
OPT_INTEGER('i', "iterations", &iterations, "Number of iterations used to compute average (default=100)"),
OPT_BOOLEAN('a', "all-cpus", &opts.target.system_wide, "system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &opts.target.cpu_list, "cpu", "list of cpus where to open events"),
OPT_STRING('p', "pid", &opts.target.pid, "pid", "record events on existing process id"),
OPT_STRING('t', "tid", &opts.target.tid, "tid", "record events on existing thread id"),
OPT_STRING('u', "uid", &opts.target.uid_str, "user", "user to profile"),
OPT_BOOLEAN(0, "per-thread", &opts.target.per_thread, "use per-thread mmaps"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench internals evlist-open-close <options>",
NULL
};
static int evlist__count_evsel_fds(struct evlist *evlist)
{
struct evsel *evsel;
int cnt = 0;
evlist__for_each_entry(evlist, evsel)
cnt += evsel->core.threads->nr * perf_cpu_map__nr(evsel->core.cpus);
return cnt;
}
static struct evlist *bench__create_evlist(char *evstr)
{
struct parse_events_error err;
struct evlist *evlist = evlist__new();
int ret;
if (!evlist) {
pr_err("Not enough memory to create evlist\n");
return NULL;
}
parse_events_error__init(&err);
ret = parse_events(evlist, evstr, &err);
if (ret) {
parse_events_error__print(&err, evstr);
parse_events_error__exit(&err);
pr_err("Run 'perf list' for a list of valid events\n");
ret = 1;
goto out_delete_evlist;
}
parse_events_error__exit(&err);
ret = evlist__create_maps(evlist, &opts.target);
if (ret < 0) {
pr_err("Not enough memory to create thread/cpu maps\n");
goto out_delete_evlist;
}
evlist__config(evlist, &opts, NULL);
return evlist;
out_delete_evlist:
evlist__delete(evlist);
return NULL;
}
static int bench__do_evlist_open_close(struct evlist *evlist)
{
char sbuf[STRERR_BUFSIZE];
int err = evlist__open(evlist);
if (err < 0) {
pr_err("evlist__open: %s\n", str_error_r(errno, sbuf, sizeof(sbuf)));
return err;
}
err = evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_err("evlist__mmap: %s\n", str_error_r(errno, sbuf, sizeof(sbuf)));
return err;
}
evlist__enable(evlist);
evlist__disable(evlist);
evlist__munmap(evlist);
evlist__close(evlist);
return 0;
}
static int bench_evlist_open_close__run(char *evstr)
{
// used to print statistics only
struct evlist *evlist = bench__create_evlist(evstr);
double time_average, time_stddev;
struct timeval start, end, diff;
struct stats time_stats;
u64 runtime_us;
int i, err;
if (!evlist)
return -ENOMEM;
init_stats(&time_stats);
printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.user_requested_cpus));
printf(" Number of threads:\t%d\n", evlist->core.threads->nr);
printf(" Number of events:\t%d (%d fds)\n",
evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
printf(" Number of iterations:\t%d\n", iterations);
evlist__delete(evlist);
for (i = 0; i < iterations; i++) {
pr_debug("Started iteration %d\n", i);
evlist = bench__create_evlist(evstr);
if (!evlist)
return -ENOMEM;
gettimeofday(&start, NULL);
err = bench__do_evlist_open_close(evlist);
if (err) {
evlist__delete(evlist);
return err;
}
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = timeval2usec(&diff);
update_stats(&time_stats, runtime_us);
evlist__delete(evlist);
pr_debug("Iteration %d took:\t%" PRIu64 "us\n", i, runtime_us);
}
time_average = avg_stats(&time_stats);
time_stddev = stddev_stats(&time_stats);
printf(" Average open-close took: %.3f usec (+- %.3f usec)\n", time_average, time_stddev);
return 0;
}
static char *bench__repeat_event_string(const char *evstr, int n)
{
char sbuf[STRERR_BUFSIZE];
struct strbuf buf;
int i, str_size = strlen(evstr),
final_size = str_size * n + n,
err = strbuf_init(&buf, final_size);
if (err) {
pr_err("strbuf_init: %s\n", str_error_r(err, sbuf, sizeof(sbuf)));
goto out_error;
}
for (i = 0; i < n; i++) {
err = strbuf_add(&buf, evstr, str_size);
if (err) {
pr_err("strbuf_add: %s\n", str_error_r(err, sbuf, sizeof(sbuf)));
goto out_error;
}
err = strbuf_addch(&buf, i == n-1 ? '\0' : ',');
if (err) {
pr_err("strbuf_addch: %s\n", str_error_r(err, sbuf, sizeof(sbuf)));
goto out_error;
}
}
return strbuf_detach(&buf, NULL);
out_error:
strbuf_release(&buf);
return NULL;
}
int bench_evlist_open_close(int argc, const char **argv)
{
char *evstr, errbuf[BUFSIZ];
int err;
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
err = target__validate(&opts.target);
if (err) {
target__strerror(&opts.target, err, errbuf, sizeof(errbuf));
pr_err("%s\n", errbuf);
goto out;
}
err = target__parse_uid(&opts.target);
if (err) {
target__strerror(&opts.target, err, errbuf, sizeof(errbuf));
pr_err("%s", errbuf);
goto out;
}
/* Enable ignoring missing threads when -u/-p option is defined. */
opts.ignore_missing_thread = opts.target.uid != UINT_MAX || opts.target.pid;
evstr = bench__repeat_event_string(event_string, nr_events);
if (!evstr) {
err = -ENOMEM;
goto out;
}
err = bench_evlist_open_close__run(evstr);
free(evstr);
out:
return err;
}
| linux-master | tools/perf/bench/evlist-open-close.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Davidlohr Bueso.
*
* Benchmark the various operations allowed for epoll_ctl(2).
* The idea is to concurrently stress a single epoll instance
*/
#ifdef HAVE_EVENTFD_SUPPORT
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <errno.h>
#include <inttypes.h>
#include <signal.h>
#include <stdlib.h>
#include <unistd.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <perf/cpumap.h>
#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include "bench.h"
#include <err.h>
#define printinfo(fmt, arg...) \
do { if (__verbose) printf(fmt, ## arg); } while (0)
static unsigned int nthreads = 0;
static unsigned int nsecs = 8;
static bool done, __verbose, randomize;
/*
* epoll related shared variables.
*/
/* Maximum number of nesting allowed inside epoll sets */
#define EPOLL_MAXNESTS 4
enum {
OP_EPOLL_ADD,
OP_EPOLL_MOD,
OP_EPOLL_DEL,
EPOLL_NR_OPS,
};
static int epollfd;
static int *epollfdp;
static bool noaffinity;
static unsigned int nested = 0;
/* amount of fds to monitor, per thread */
static unsigned int nfds = 64;
static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats all_stats[EPOLL_NR_OPS];
static struct cond thread_parent, thread_worker;
struct worker {
int tid;
pthread_t thread;
unsigned long ops[EPOLL_NR_OPS];
int *fdmap;
};
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
OPT_UINTEGER('f', "nfds", &nfds, "Specify amount of file descriptors to monitor for each thread"),
OPT_BOOLEAN( 'n', "noaffinity", &noaffinity, "Disables CPU affinity"),
OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"),
OPT_BOOLEAN( 'R', "randomize", &randomize, "Perform random operations on random fds"),
OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"),
OPT_END()
};
static const char * const bench_epoll_ctl_usage[] = {
"perf bench epoll ctl <options>",
NULL
};
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
/* inform all threads that we're done for the day */
done = true;
gettimeofday(&bench__end, NULL);
timersub(&bench__end, &bench__start, &bench__runtime);
}
static void nest_epollfd(void)
{
unsigned int i;
struct epoll_event ev;
if (nested > EPOLL_MAXNESTS)
nested = EPOLL_MAXNESTS;
printinfo("Nesting level(s): %d\n", nested);
epollfdp = calloc(nested, sizeof(int));
if (!epollfdp)
err(EXIT_FAILURE, "calloc");
for (i = 0; i < nested; i++) {
epollfdp[i] = epoll_create(1);
if (epollfd < 0)
err(EXIT_FAILURE, "epoll_create");
}
ev.events = EPOLLHUP; /* anything */
ev.data.u64 = i; /* any number */
for (i = nested - 1; i; i--) {
if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD,
epollfdp[i], &ev) < 0)
err(EXIT_FAILURE, "epoll_ctl");
}
if (epoll_ctl(epollfd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0)
err(EXIT_FAILURE, "epoll_ctl");
}
static inline void do_epoll_op(struct worker *w, int op, int fd)
{
int error;
struct epoll_event ev;
ev.events = EPOLLIN;
ev.data.u64 = fd;
switch (op) {
case OP_EPOLL_ADD:
error = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
break;
case OP_EPOLL_MOD:
ev.events = EPOLLOUT;
error = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &ev);
break;
case OP_EPOLL_DEL:
error = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, NULL);
break;
default:
error = 1;
break;
}
if (!error)
w->ops[op]++;
}
static inline void do_random_epoll_op(struct worker *w)
{
unsigned long rnd1 = random(), rnd2 = random();
int op, fd;
fd = w->fdmap[rnd1 % nfds];
op = rnd2 % EPOLL_NR_OPS;
do_epoll_op(w, op, fd);
}
static void *workerfn(void *arg)
{
unsigned int i;
struct worker *w = (struct worker *) arg;
struct timespec ts = { .tv_sec = 0,
.tv_nsec = 250 };
mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
cond_signal(&thread_parent);
cond_wait(&thread_worker, &thread_lock);
mutex_unlock(&thread_lock);
/* Let 'em loose */
do {
/* random */
if (randomize) {
do_random_epoll_op(w);
} else {
for (i = 0; i < nfds; i++) {
do_epoll_op(w, OP_EPOLL_ADD, w->fdmap[i]);
do_epoll_op(w, OP_EPOLL_MOD, w->fdmap[i]);
do_epoll_op(w, OP_EPOLL_DEL, w->fdmap[i]);
}
}
nanosleep(&ts, NULL);
} while (!done);
return NULL;
}
static void init_fdmaps(struct worker *w, int pct)
{
unsigned int i;
int inc;
struct epoll_event ev;
if (!pct)
return;
inc = 100/pct;
for (i = 0; i < nfds; i+=inc) {
ev.data.fd = w->fdmap[i];
ev.events = EPOLLIN;
if (epoll_ctl(epollfd, EPOLL_CTL_ADD, w->fdmap[i], &ev) < 0)
err(EXIT_FAILURE, "epoll_ct");
}
}
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t *cpuset;
unsigned int i, j;
int ret = 0;
int nrcpus;
size_t size;
if (!noaffinity)
pthread_attr_init(&thread_attr);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i];
w->tid = i;
w->fdmap = calloc(nfds, sizeof(int));
if (!w->fdmap)
return 1;
for (j = 0; j < nfds; j++) {
w->fdmap[j] = eventfd(0, EFD_NONBLOCK);
if (w->fdmap[j] < 0)
err(EXIT_FAILURE, "eventfd");
}
/*
* Lets add 50% of the fdmap to the epoll instance, and
* do it before any threads are started; otherwise there is
* an initial bias of the call failing (mod and del ops).
*/
if (randomize)
init_fdmaps(w, 50);
if (!noaffinity) {
CPU_ZERO_S(size, cpuset);
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
size, cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
attrp = &thread_attr;
}
ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w);
if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
}
}
CPU_FREE(cpuset);
if (!noaffinity)
pthread_attr_destroy(&thread_attr);
return ret;
}
static void print_summary(void)
{
int i;
unsigned long avg[EPOLL_NR_OPS];
double stddev[EPOLL_NR_OPS];
for (i = 0; i < EPOLL_NR_OPS; i++) {
avg[i] = avg_stats(&all_stats[i]);
stddev[i] = stddev_stats(&all_stats[i]);
}
printf("\nAveraged %ld ADD operations (+- %.2f%%)\n",
avg[OP_EPOLL_ADD], rel_stddev_stats(stddev[OP_EPOLL_ADD],
avg[OP_EPOLL_ADD]));
printf("Averaged %ld MOD operations (+- %.2f%%)\n",
avg[OP_EPOLL_MOD], rel_stddev_stats(stddev[OP_EPOLL_MOD],
avg[OP_EPOLL_MOD]));
printf("Averaged %ld DEL operations (+- %.2f%%)\n",
avg[OP_EPOLL_DEL], rel_stddev_stats(stddev[OP_EPOLL_DEL],
avg[OP_EPOLL_DEL]));
}
int bench_epoll_ctl(int argc, const char **argv)
{
int j, ret = 0;
struct sigaction act;
struct worker *worker = NULL;
struct perf_cpu_map *cpu;
struct rlimit rl, prevrl;
unsigned int i;
argc = parse_options(argc, argv, options, bench_epoll_ctl_usage, 0);
if (argc) {
usage_with_options(bench_epoll_ctl_usage, options);
exit(EXIT_FAILURE);
}
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
cpu = perf_cpu_map__new(NULL);
if (!cpu)
goto errmem;
/* a single, main epoll instance */
epollfd = epoll_create(1);
if (epollfd < 0)
err(EXIT_FAILURE, "epoll_create");
/*
* Deal with nested epolls, if any.
*/
if (nested)
nest_epollfd();
/* default to the number of CPUs */
if (!nthreads)
nthreads = perf_cpu_map__nr(cpu);
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
goto errmem;
if (getrlimit(RLIMIT_NOFILE, &prevrl))
err(EXIT_FAILURE, "getrlimit");
rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n",
(uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
err(EXIT_FAILURE, "setrlimit");
printf("Run summary [PID %d]: %d threads doing epoll_ctl ops "
"%d file-descriptors for %d secs.\n\n",
getpid(), nthreads, nfds, nsecs);
for (i = 0; i < EPOLL_NR_OPS; i++)
init_stats(&all_stats[i]);
mutex_init(&thread_lock);
cond_init(&thread_parent);
cond_init(&thread_worker);
threads_starting = nthreads;
gettimeofday(&bench__start, NULL);
do_threads(worker, cpu);
mutex_lock(&thread_lock);
while (threads_starting)
cond_wait(&thread_parent, &thread_lock);
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
sleep(nsecs);
toggle_done(0, NULL, NULL);
printinfo("main thread: toggling done\n");
for (i = 0; i < nthreads; i++) {
ret = pthread_join(worker[i].thread, NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
/* cleanup & report results */
cond_destroy(&thread_parent);
cond_destroy(&thread_worker);
mutex_destroy(&thread_lock);
for (i = 0; i < nthreads; i++) {
unsigned long t[EPOLL_NR_OPS];
for (j = 0; j < EPOLL_NR_OPS; j++) {
t[j] = worker[i].ops[j];
update_stats(&all_stats[j], t[j]);
}
if (nfds == 1)
printf("[thread %2d] fdmap: %p [ add: %04ld; mod: %04ld; del: %04lds ops ]\n",
worker[i].tid, &worker[i].fdmap[0],
t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]);
else
printf("[thread %2d] fdmap: %p ... %p [ add: %04ld ops; mod: %04ld ops; del: %04ld ops ]\n",
worker[i].tid, &worker[i].fdmap[0],
&worker[i].fdmap[nfds-1],
t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]);
}
print_summary();
close(epollfd);
perf_cpu_map__put(cpu);
for (i = 0; i < nthreads; i++)
free(worker[i].fdmap);
free(worker);
return ret;
errmem:
err(EXIT_FAILURE, "calloc");
}
#endif // HAVE_EVENTFD_SUPPORT
| linux-master | tools/perf/bench/epoll-ctl.c |
// SPDX-License-Identifier: LGPL-2.1
// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
#include "trace/beauty/beauty.h"
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/un.h>
#include <arpa/inet.h>
#include "trace/beauty/generated/sockaddr.c"
DEFINE_STRARRAY(socket_families, "PF_");
static size_t af_inet__scnprintf(struct sockaddr *sa, char *bf, size_t size)
{
struct sockaddr_in *sin = (struct sockaddr_in *)sa;
char tmp[16];
return scnprintf(bf, size, ", port: %d, addr: %s", ntohs(sin->sin_port),
inet_ntop(sin->sin_family, &sin->sin_addr, tmp, sizeof(tmp)));
}
static size_t af_inet6__scnprintf(struct sockaddr *sa, char *bf, size_t size)
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
u32 flowinfo = ntohl(sin6->sin6_flowinfo);
char tmp[512];
size_t printed = scnprintf(bf, size, ", port: %d, addr: %s", ntohs(sin6->sin6_port),
inet_ntop(sin6->sin6_family, &sin6->sin6_addr, tmp, sizeof(tmp)));
if (flowinfo != 0)
printed += scnprintf(bf + printed, size - printed, ", flowinfo: %lu", flowinfo);
if (sin6->sin6_scope_id != 0)
printed += scnprintf(bf + printed, size - printed, ", scope_id: %lu", sin6->sin6_scope_id);
return printed;
}
static size_t af_local__scnprintf(struct sockaddr *sa, char *bf, size_t size)
{
struct sockaddr_un *sun = (struct sockaddr_un *)sa;
return scnprintf(bf, size, ", path: %s", sun->sun_path);
}
static size_t (*af_scnprintfs[])(struct sockaddr *sa, char *bf, size_t size) = {
[AF_LOCAL] = af_local__scnprintf,
[AF_INET] = af_inet__scnprintf,
[AF_INET6] = af_inet6__scnprintf,
};
static size_t syscall_arg__scnprintf_augmented_sockaddr(struct syscall_arg *arg, char *bf, size_t size)
{
struct sockaddr *sa = (struct sockaddr *)arg->augmented.args;
char family[32];
size_t printed;
strarray__scnprintf(&strarray__socket_families, family, sizeof(family), "%d", arg->show_string_prefix, sa->sa_family);
printed = scnprintf(bf, size, "{ .family: %s", family);
if (sa->sa_family < ARRAY_SIZE(af_scnprintfs) && af_scnprintfs[sa->sa_family])
printed += af_scnprintfs[sa->sa_family](sa, bf + printed, size - printed);
return printed + scnprintf(bf + printed, size - printed, " }");
}
size_t syscall_arg__scnprintf_sockaddr(char *bf, size_t size, struct syscall_arg *arg)
{
if (arg->augmented.args)
return syscall_arg__scnprintf_augmented_sockaddr(arg, bf, size);
return scnprintf(bf, size, "%#lx", arg->val);
}
| linux-master | tools/perf/trace/beauty/sockaddr.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/prctl.c
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include <uapi/linux/prctl.h>
#include "trace/beauty/generated/prctl_option_array.c"
DEFINE_STRARRAY(prctl_options, "PR_");
static size_t prctl__scnprintf_option(int option, char *bf, size_t size, bool show_prefix)
{
return strarray__scnprintf(&strarray__prctl_options, bf, size, "%d", show_prefix, option);
}
static size_t prctl__scnprintf_set_mm(int option, char *bf, size_t size, bool show_prefix)
{
static DEFINE_STRARRAY(prctl_set_mm_options, "PR_SET_MM_");
return strarray__scnprintf(&strarray__prctl_set_mm_options, bf, size, "%d", show_prefix, option);
}
size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_arg *arg)
{
int option = syscall_arg__val(arg, 0);
if (option == PR_SET_MM)
return prctl__scnprintf_set_mm(arg->val, bf, size, arg->show_string_prefix);
/*
* We still don't grab the contents of pointers on entry or exit,
* so just print them as hex numbers
*/
if (option == PR_SET_NAME)
return syscall_arg__scnprintf_hex(bf, size, arg);
return syscall_arg__scnprintf_long(bf, size, arg);
}
size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg)
{
int option = syscall_arg__val(arg, 0);
if (option == PR_SET_MM)
return syscall_arg__scnprintf_hex(bf, size, arg);
return syscall_arg__scnprintf_long(bf, size, arg);
}
size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long option = arg->val;
enum {
SPO_ARG2 = (1 << 1),
SPO_ARG3 = (1 << 2),
SPO_ARG4 = (1 << 3),
SPO_ARG5 = (1 << 4),
SPO_ARG6 = (1 << 5),
};
const u8 all_but2 = SPO_ARG3 | SPO_ARG4 | SPO_ARG5 | SPO_ARG6;
const u8 all = SPO_ARG2 | all_but2;
const u8 masks[] = {
[PR_GET_DUMPABLE] = all,
[PR_SET_DUMPABLE] = all_but2,
[PR_SET_NAME] = all_but2,
[PR_GET_CHILD_SUBREAPER] = all_but2,
[PR_SET_CHILD_SUBREAPER] = all_but2,
[PR_GET_SECUREBITS] = all,
[PR_SET_SECUREBITS] = all_but2,
[PR_SET_MM] = SPO_ARG4 | SPO_ARG5 | SPO_ARG6,
[PR_GET_PDEATHSIG] = all,
[PR_SET_PDEATHSIG] = all_but2,
};
if (option < ARRAY_SIZE(masks))
arg->mask |= masks[option];
return prctl__scnprintf_option(option, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/prctl.c |
#include "trace/beauty/generated/arch_errno_name_array.c"
| linux-master | tools/perf/trace/beauty/arch_errno_names.c |
// SPDX-License-Identifier: LGPL-2.1
#include <sys/types.h>
#include <sys/socket.h>
#ifndef MSG_PROBE
#define MSG_PROBE 0x10
#endif
#ifndef MSG_WAITFORONE
#define MSG_WAITFORONE 0x10000
#endif
#ifndef MSG_BATCH
#define MSG_BATCH 0x40000
#endif
#ifndef MSG_ZEROCOPY
#define MSG_ZEROCOPY 0x4000000
#endif
#ifndef MSG_SPLICE_PAGES
#define MSG_SPLICE_PAGES 0x8000000
#endif
#ifndef MSG_FASTOPEN
#define MSG_FASTOPEN 0x20000000
#endif
#ifndef MSG_CMSG_CLOEXEC
# define MSG_CMSG_CLOEXEC 0x40000000
#endif
static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "MSG_";
int printed = 0, flags = arg->val;
if (flags == 0)
return scnprintf(bf, size, "NONE");
#define P_MSG_FLAG(n) \
if (flags & MSG_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~MSG_##n; \
}
P_MSG_FLAG(OOB);
P_MSG_FLAG(PEEK);
P_MSG_FLAG(DONTROUTE);
P_MSG_FLAG(CTRUNC);
P_MSG_FLAG(PROBE);
P_MSG_FLAG(TRUNC);
P_MSG_FLAG(DONTWAIT);
P_MSG_FLAG(EOR);
P_MSG_FLAG(WAITALL);
P_MSG_FLAG(FIN);
P_MSG_FLAG(SYN);
P_MSG_FLAG(CONFIRM);
P_MSG_FLAG(RST);
P_MSG_FLAG(ERRQUEUE);
P_MSG_FLAG(NOSIGNAL);
P_MSG_FLAG(MORE);
P_MSG_FLAG(WAITFORONE);
P_MSG_FLAG(BATCH);
P_MSG_FLAG(ZEROCOPY);
P_MSG_FLAG(SPLICE_PAGES);
P_MSG_FLAG(FASTOPEN);
P_MSG_FLAG(CMSG_CLOEXEC);
#undef P_MSG_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
| linux-master | tools/perf/trace/beauty/msg_flags.c |
// SPDX-License-Identifier: LGPL-2.1
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include <uapi/linux/fcntl.h>
#ifndef LOCK_MAND
#define LOCK_MAND 32
#endif
#ifndef LOCK_READ
#define LOCK_READ 64
#endif
#ifndef LOCK_WRITE
#define LOCK_WRITE 128
#endif
#ifndef LOCK_RW
#define LOCK_RW 192
#endif
size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "LOCK_";
int printed = 0, op = arg->val;
if (op == 0)
return scnprintf(bf, size, "NONE");
#define P_CMD(cmd) \
if ((op & LOCK_##cmd) == LOCK_##cmd) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #cmd); \
op &= ~LOCK_##cmd; \
}
P_CMD(SH);
P_CMD(EX);
P_CMD(NB);
P_CMD(UN);
P_CMD(MAND);
P_CMD(RW);
P_CMD(READ);
P_CMD(WRITE);
#undef P_OP
if (op)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
return printed;
}
| linux-master | tools/perf/trace/beauty/flock.c |
// SPDX-License-Identifier: LGPL-2.1
#include <sched.h>
/*
* Not defined anywhere else, probably, just to make sure we
* catch future flags
*/
#define SCHED_POLICY_MASK 0xff
#ifndef SCHED_DEADLINE
#define SCHED_DEADLINE 6
#endif
#ifndef SCHED_RESET_ON_FORK
#define SCHED_RESET_ON_FORK 0x40000000
#endif
static size_t syscall_arg__scnprintf_sched_policy(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "SCHED_";
const char *policies[] = {
"NORMAL", "FIFO", "RR", "BATCH", "ISO", "IDLE", "DEADLINE",
};
size_t printed;
int policy = arg->val,
flags = policy & ~SCHED_POLICY_MASK;
policy &= SCHED_POLICY_MASK;
if (policy <= SCHED_DEADLINE)
printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", policies[policy]);
else
printed = scnprintf(bf, size, "%#x", policy);
#define P_POLICY_FLAG(n) \
if (flags & SCHED_##n) { \
printed += scnprintf(bf + printed, size - printed, "|%s%s", show_prefix ? prefix : "", #n); \
flags &= ~SCHED_##n; \
}
P_POLICY_FLAG(RESET_ON_FORK);
#undef P_POLICY_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
return printed;
}
#define SCA_SCHED_POLICY syscall_arg__scnprintf_sched_policy
| linux-master | tools/perf/trace/beauty/sched_policy.c |
// SPDX-License-Identifier: LGPL-2.1
// Copyright (C) 2022, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
#include "trace/beauty/beauty.h"
#include <inttypes.h>
#include <time.h>
static size_t syscall_arg__scnprintf_augmented_timespec(struct syscall_arg *arg, char *bf, size_t size)
{
struct timespec *ts = (struct timespec *)arg->augmented.args;
return scnprintf(bf, size, "{ .tv_sec: %" PRIu64 ", .tv_nsec: %" PRIu64 " }", ts->tv_sec, ts->tv_nsec);
}
size_t syscall_arg__scnprintf_timespec(char *bf, size_t size, struct syscall_arg *arg)
{
if (arg->augmented.args)
return syscall_arg__scnprintf_augmented_timespec(arg, bf, size);
return scnprintf(bf, size, "%#lx", arg->val);
}
| linux-master | tools/perf/trace/beauty/timespec.c |
// SPDX-License-Identifier: LGPL-2.1
#include <linux/log2.h>
#include "trace/beauty/generated/mmap_prot_array.c"
static DEFINE_STRARRAY(mmap_prot, "PROT_");
static size_t mmap__scnprintf_prot(unsigned long prot, char *bf, size_t size, bool show_prefix)
{
return strarray__scnprintf_flags(&strarray__mmap_prot, bf, size, show_prefix, prot);
}
static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long prot = arg->val;
if (prot == 0)
return scnprintf(bf, size, "%sNONE", arg->show_string_prefix ? strarray__mmap_prot.prefix : "");
return mmap__scnprintf_prot(prot, bf, size, arg->show_string_prefix);
}
#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
#include "trace/beauty/generated/mmap_flags_array.c"
static DEFINE_STRARRAY(mmap_flags, "MAP_");
static size_t mmap__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
return strarray__scnprintf_flags(&strarray__mmap_flags, bf, size, show_prefix, flags);
}
static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
unsigned long flags = arg->val;
if (flags & MAP_ANONYMOUS)
arg->mask |= (1 << 4) | (1 << 5); /* Mask 4th ('fd') and 5th ('offset') args, ignored */
return mmap__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
#include "trace/beauty/generated/mremap_flags_array.c"
static DEFINE_STRARRAY(mremap_flags, "MREMAP_");
static size_t mremap__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
return strarray__scnprintf_flags(&strarray__mremap_flags, bf, size, show_prefix, flags);
}
static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
if (!(flags & MREMAP_FIXED))
arg->mask |= (1 << 5); /* Mask 5th ('new_address') args, ignored */
return mremap__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
static size_t madvise__scnprintf_behavior(int behavior, char *bf, size_t size)
{
#include "trace/beauty/generated/madvise_behavior_array.c"
static DEFINE_STRARRAY(madvise_advices, "MADV_");
if (behavior < strarray__madvise_advices.nr_entries && strarray__madvise_advices.entries[behavior] != NULL)
return scnprintf(bf, size, "MADV_%s", strarray__madvise_advices.entries[behavior]);
return scnprintf(bf, size, "%#", behavior);
}
static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
struct syscall_arg *arg)
{
return madvise__scnprintf_behavior(arg->val, bf, size);
}
#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
| linux-master | tools/perf/trace/beauty/mmap.c |
// SPDX-License-Identifier: LGPL-2.1
#ifndef PERF_FLAG_FD_NO_GROUP
# define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#endif
#ifndef PERF_FLAG_FD_OUTPUT
# define PERF_FLAG_FD_OUTPUT (1UL << 1)
#endif
#ifndef PERF_FLAG_PID_CGROUP
# define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
#endif
#ifndef PERF_FLAG_FD_CLOEXEC
# define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
#endif
static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "PERF_";
int printed = 0, flags = arg->val;
if (flags == 0)
return 0;
#define P_FLAG(n) \
if (flags & PERF_FLAG_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~PERF_FLAG_##n; \
}
P_FLAG(FD_NO_GROUP);
P_FLAG(FD_OUTPUT);
P_FLAG(PID_CGROUP);
P_FLAG(FD_CLOEXEC);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
struct attr_fprintf_args {
size_t size, printed;
char *bf;
bool first;
};
static int attr__fprintf(FILE *fp __maybe_unused, const char *name, const char *val, void *priv)
{
struct attr_fprintf_args *args = priv;
size_t printed = scnprintf(args->bf + args->printed , args->size - args->printed, "%s%s: %s", args->first ? "" : ", ", name, val);
args->first = false;
args->printed += printed;
return printed;
}
static size_t perf_event_attr___scnprintf(struct perf_event_attr *attr, char *bf, size_t size, bool show_zeros __maybe_unused)
{
struct attr_fprintf_args args = {
.printed = scnprintf(bf, size, "{ "),
.size = size,
.first = true,
.bf = bf,
};
perf_event_attr__fprintf(stdout, attr, attr__fprintf, &args);
return args.printed + scnprintf(bf + args.printed, size - args.printed, " }");
}
static size_t syscall_arg__scnprintf_augmented_perf_event_attr(struct syscall_arg *arg, char *bf, size_t size)
{
return perf_event_attr___scnprintf((void *)arg->augmented.args, bf, size, arg->trace->show_zeros);
}
static size_t syscall_arg__scnprintf_perf_event_attr(char *bf, size_t size, struct syscall_arg *arg)
{
if (arg->augmented.args)
return syscall_arg__scnprintf_augmented_perf_event_attr(arg, bf, size);
return scnprintf(bf, size, "%#lx", arg->val);
}
#define SCA_PERF_ATTR syscall_arg__scnprintf_perf_event_attr
| linux-master | tools/perf/trace/beauty/perf_event_open.c |
// SPDX-License-Identifier: LGPL-2.1
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
/* From include/linux/stat.h */
#ifndef S_IRWXUGO
#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
#endif
#ifndef S_IALLUGO
#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
#endif
#ifndef S_IRUGO
#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
#endif
#ifndef S_IWUGO
#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
#endif
#ifndef S_IXUGO
#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
#endif
static size_t syscall_arg__scnprintf_mode_t(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "S_";
int printed = 0, mode = arg->val;
#define P_MODE(n) \
if ((mode & S_##n) == S_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
mode &= ~S_##n; \
}
P_MODE(IALLUGO);
P_MODE(IRWXUGO);
P_MODE(IRUGO);
P_MODE(IWUGO);
P_MODE(IXUGO);
P_MODE(IFMT);
P_MODE(IFSOCK);
P_MODE(IFLNK);
P_MODE(IFREG);
P_MODE(IFBLK);
P_MODE(IFDIR);
P_MODE(IFCHR);
P_MODE(IFIFO);
P_MODE(ISUID);
P_MODE(ISGID);
P_MODE(ISVTX);
P_MODE(IRWXU);
P_MODE(IRUSR);
P_MODE(IWUSR);
P_MODE(IXUSR);
P_MODE(IRWXG);
P_MODE(IRGRP);
P_MODE(IWGRP);
P_MODE(IXGRP);
P_MODE(IRWXO);
P_MODE(IROTH);
P_MODE(IWOTH);
P_MODE(IXOTH);
#undef P_MODE
if (mode)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", mode);
return printed;
}
#define SCA_MODE_T syscall_arg__scnprintf_mode_t
| linux-master | tools/perf/trace/beauty/mode_t.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/pkey_alloc.c
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include <linux/log2.h>
size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, bool show_prefix, unsigned long flags)
{
int i, printed = 0;
if (flags == 0) {
const char *s = sa->entries[0];
if (s)
return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", s);
return scnprintf(bf, size, "%d", 0);
}
for (i = 1; i < sa->nr_entries; ++i) {
unsigned long bit = 1UL << (i - 1);
if (!(flags & bit))
continue;
if (printed != 0)
printed += scnprintf(bf + printed, size - printed, "|");
if (sa->entries[i] != NULL)
printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? sa->prefix : "", sa->entries[i]);
else
printed += scnprintf(bf + printed, size - printed, "0x%#", bit);
}
return printed;
}
static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/pkey_alloc_access_rights_array.c"
static DEFINE_STRARRAY(pkey_alloc_access_rights, "PKEY_");
return strarray__scnprintf_flags(&strarray__pkey_alloc_access_rights, bf, size, show_prefix, access_rights);
}
size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
return pkey_alloc__scnprintf_access_rights(cmd, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/pkey_alloc.c |
// SPDX-License-Identifier: LGPL-2.1
size_t syscall_arg__scnprintf_pid(char *bf, size_t size, struct syscall_arg *arg)
{
int pid = arg->val;
struct trace *trace = arg->trace;
size_t printed = scnprintf(bf, size, "%d", pid);
struct thread *thread = machine__findnew_thread(trace->host, pid, pid);
if (thread != NULL) {
if (!thread__comm_set(thread))
thread__set_comm_from_proc(thread);
if (thread__comm_set(thread))
printed += scnprintf(bf + printed, size - printed,
" (%s)", thread__comm_str(thread));
thread__put(thread);
}
return printed;
}
| linux-master | tools/perf/trace/beauty/pid.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/fspick.c
*
* Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/log2.h>
static size_t fspick__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/fspick_arrays.c"
static DEFINE_STRARRAY(fspick_flags, "FSPICK_");
return strarray__scnprintf_flags(&strarray__fspick_flags, bf, size, show_prefix, flags);
}
size_t syscall_arg__scnprintf_fspick_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
return fspick__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/fspick.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/socket.c
*
* Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <sys/types.h>
#include <sys/socket.h>
#include "trace/beauty/generated/socket.c"
static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size, bool show_prefix)
{
static DEFINE_STRARRAY(socket_ipproto, "IPPROTO_");
return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", show_prefix, protocol);
}
size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg)
{
int domain = syscall_arg__val(arg, 0);
if (domain == AF_INET || domain == AF_INET6)
return socket__scnprintf_ipproto(arg->val, bf, size, arg->show_string_prefix);
return syscall_arg__scnprintf_int(bf, size, arg);
}
static size_t socket__scnprintf_level(int level, char *bf, size_t size, bool show_prefix)
{
#if defined(__alpha__) || defined(__hppa__) || defined(__mips__) || defined(__sparc__)
const int sol_socket = 0xffff;
#else
const int sol_socket = 1;
#endif
if (level == sol_socket)
return scnprintf(bf, size, "%sSOCKET", show_prefix ? "SOL_" : "");
return strarray__scnprintf(&strarray__socket_level, bf, size, "%d", show_prefix, level);
}
size_t syscall_arg__scnprintf_socket_level(char *bf, size_t size, struct syscall_arg *arg)
{
return socket__scnprintf_level(arg->val, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/socket.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/move_mount.c
*
* Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/log2.h>
static size_t move_mount__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/move_mount_flags_array.c"
static DEFINE_STRARRAY(move_mount_flags, "MOVE_MOUNT_");
return strarray__scnprintf_flags(&strarray__move_mount_flags, bf, size, show_prefix, flags);
}
size_t syscall_arg__scnprintf_move_mount_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
return move_mount__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/move_mount.c |
// SPDX-License-Identifier: LGPL-2.1
#ifndef SECCOMP_SET_MODE_STRICT
#define SECCOMP_SET_MODE_STRICT 0
#endif
#ifndef SECCOMP_SET_MODE_FILTER
#define SECCOMP_SET_MODE_FILTER 1
#endif
static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "SECCOMP_SET_MODE_";
int op = arg->val;
size_t printed = 0;
switch (op) {
#define P_SECCOMP_SET_MODE_OP(n) case SECCOMP_SET_MODE_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n); break
P_SECCOMP_SET_MODE_OP(STRICT);
P_SECCOMP_SET_MODE_OP(FILTER);
#undef P_SECCOMP_SET_MODE_OP
default: printed = scnprintf(bf, size, "%#x", op); break;
}
return printed;
}
#define SCA_SECCOMP_OP syscall_arg__scnprintf_seccomp_op
#ifndef SECCOMP_FILTER_FLAG_TSYNC
#define SECCOMP_FILTER_FLAG_TSYNC 1
#endif
static size_t syscall_arg__scnprintf_seccomp_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "SECCOMP_FILTER_FLAG_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & SECCOMP_FILTER_FLAG_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~SECCOMP_FILTER_FLAG_##n; \
}
P_FLAG(TSYNC);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
#define SCA_SECCOMP_FLAGS syscall_arg__scnprintf_seccomp_flags
| linux-master | tools/perf/trace/beauty/seccomp.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/kcmp.c
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include <sys/types.h>
#include <machine.h>
#include <uapi/linux/kcmp.h>
#include "trace/beauty/generated/kcmp_type_array.c"
size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long fd = arg->val;
int type = syscall_arg__val(arg, 2);
pid_t pid;
if (type != KCMP_FILE)
return syscall_arg__scnprintf_long(bf, size, arg);
pid = syscall_arg__val(arg, arg->idx == 3 ? 0 : 1); /* idx1 -> pid1, idx2 -> pid2 */
return pid__scnprintf_fd(arg->trace, pid, fd, bf, size);
}
static size_t kcmp__scnprintf_type(int type, char *bf, size_t size, bool show_prefix)
{
static DEFINE_STRARRAY(kcmp_types, "KCMP_");
return strarray__scnprintf(&strarray__kcmp_types, bf, size, "%d", show_prefix, type);
}
size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long type = arg->val;
if (type != KCMP_FILE)
arg->mask |= (1 << 3) | (1 << 4); /* Ignore idx1 and idx2 */
return kcmp__scnprintf_type(type, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/kcmp.c |
// SPDX-License-Identifier: LGPL-2.1
#include <sys/types.h>
#include <sys/wait.h>
static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "W";
int printed = 0, options = arg->val;
#define P_OPTION(n) \
if (options & W##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
options &= ~W##n; \
}
P_OPTION(NOHANG);
P_OPTION(UNTRACED);
P_OPTION(CONTINUED);
#undef P_OPTION
if (options)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", options);
return printed;
}
#define SCA_WAITID_OPTIONS syscall_arg__scnprintf_waitid_options
| linux-master | tools/perf/trace/beauty/waitid_options.c |
// SPDX-License-Identifier: LGPL-2.1
#include <signal.h>
static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "SIG";
int sig = arg->val;
switch (sig) {
#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n)
P_SIGNUM(HUP);
P_SIGNUM(INT);
P_SIGNUM(QUIT);
P_SIGNUM(ILL);
P_SIGNUM(TRAP);
P_SIGNUM(ABRT);
P_SIGNUM(BUS);
P_SIGNUM(FPE);
P_SIGNUM(KILL);
P_SIGNUM(USR1);
P_SIGNUM(SEGV);
P_SIGNUM(USR2);
P_SIGNUM(PIPE);
P_SIGNUM(ALRM);
P_SIGNUM(TERM);
P_SIGNUM(CHLD);
P_SIGNUM(CONT);
P_SIGNUM(STOP);
P_SIGNUM(TSTP);
P_SIGNUM(TTIN);
P_SIGNUM(TTOU);
P_SIGNUM(URG);
P_SIGNUM(XCPU);
P_SIGNUM(XFSZ);
P_SIGNUM(VTALRM);
P_SIGNUM(PROF);
P_SIGNUM(WINCH);
P_SIGNUM(IO);
P_SIGNUM(PWR);
P_SIGNUM(SYS);
#ifdef SIGEMT
P_SIGNUM(EMT);
#endif
#ifdef SIGSTKFLT
P_SIGNUM(STKFLT);
#endif
#ifdef SIGSWI
P_SIGNUM(SWI);
#endif
default: break;
}
return scnprintf(bf, size, "%#x", sig);
}
#define SCA_SIGNUM syscall_arg__scnprintf_signum
| linux-master | tools/perf/trace/beauty/signum.c |
// SPDX-License-Identifier: LGPL-2.1
#include <sys/types.h>
#include <sys/socket.h>
#ifndef SOCK_DCCP
# define SOCK_DCCP 6
#endif
#ifndef SOCK_CLOEXEC
# define SOCK_CLOEXEC 02000000
#endif
#ifndef SOCK_NONBLOCK
# define SOCK_NONBLOCK 00004000
#endif
#ifndef SOCK_TYPE_MASK
#define SOCK_TYPE_MASK 0xf
#endif
static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "SOCK_";
size_t printed;
int type = arg->val,
flags = type & ~SOCK_TYPE_MASK;
type &= SOCK_TYPE_MASK;
/*
* Can't use a strarray, MIPS may override for ABI reasons.
*/
switch (type) {
#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n); break;
P_SK_TYPE(STREAM);
P_SK_TYPE(DGRAM);
P_SK_TYPE(RAW);
P_SK_TYPE(RDM);
P_SK_TYPE(SEQPACKET);
P_SK_TYPE(DCCP);
P_SK_TYPE(PACKET);
#undef P_SK_TYPE
default:
printed = scnprintf(bf, size, "%#x", type);
}
#define P_SK_FLAG(n) \
if (flags & SOCK_##n) { \
printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
flags &= ~SOCK_##n; \
}
P_SK_FLAG(CLOEXEC);
P_SK_FLAG(NONBLOCK);
#undef P_SK_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
return printed;
}
#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
| linux-master | tools/perf/trace/beauty/socket_type.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/ioctl.c
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
/*
* FIXME: to support all arches we have to improve this, for
* now, to build on older systems without things like TIOCGEXCL,
* get it directly from our copy.
*
* Right now only x86 is being supported for beautifying ioctl args
* in 'perf trace', see tools/perf/trace/beauty/Build and builtin-trace.c
*/
#include <uapi/asm-generic/ioctls.h>
static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
{
static const char *ioctl_tty_cmd[] = {
[_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
"TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY",
"TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ",
"TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR",
"FIONREAD", "TIOCLINUX", "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT",
"FIONBIO", "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP",
[_IOC_NR(TIOCSBRK)] = "TIOCSBRK", "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2",
"TCSETSW2", "TCSETSF2", "TIOCGRS48", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
"TIOCGDEV", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG", "TIOCVHANGUP", "TIOCGPKT",
"TIOCGPTLCK", [_IOC_NR(TIOCGEXCL)] = "TIOCGEXCL", "TIOCGPTPEER",
"TIOCGISO7816", "TIOCSISO7816",
[_IOC_NR(FIONCLEX)] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
"TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
"TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
"TIOCMIWAIT", "TIOCGICOUNT", };
static DEFINE_STRARRAY(ioctl_tty_cmd, "");
if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL)
return scnprintf(bf, size, "%s", strarray__ioctl_tty_cmd.entries[nr]);
return scnprintf(bf, size, "(%#x, %#x, %#x)", 'T', nr, dir);
}
static size_t ioctl__scnprintf_drm_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/drm_ioctl_array.c"
static DEFINE_STRARRAY(drm_ioctl_cmds, "");
if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "DRM_%s", strarray__drm_ioctl_cmds.entries[nr]);
return scnprintf(bf, size, "(%#x, %#x, %#x)", 'd', nr, dir);
}
static size_t ioctl__scnprintf_sndrv_pcm_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/sndrv_pcm_ioctl_array.c"
static DEFINE_STRARRAY(sndrv_pcm_ioctl_cmds, "");
if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "SNDRV_PCM_%s", strarray__sndrv_pcm_ioctl_cmds.entries[nr]);
return scnprintf(bf, size, "(%#x, %#x, %#x)", 'A', nr, dir);
}
static size_t ioctl__scnprintf_sndrv_ctl_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/sndrv_ctl_ioctl_array.c"
static DEFINE_STRARRAY(sndrv_ctl_ioctl_cmds, "");
if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "SNDRV_CTL_%s", strarray__sndrv_ctl_ioctl_cmds.entries[nr]);
return scnprintf(bf, size, "(%#x, %#x, %#x)", 'U', nr, dir);
}
static size_t ioctl__scnprintf_kvm_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/kvm_ioctl_array.c"
static DEFINE_STRARRAY(kvm_ioctl_cmds, "");
if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "KVM_%s", strarray__kvm_ioctl_cmds.entries[nr]);
return scnprintf(bf, size, "(%#x, %#x, %#x)", 0xAE, nr, dir);
}
static size_t ioctl__scnprintf_vhost_virtio_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/vhost_virtio_ioctl_array.c"
static DEFINE_STRARRAY(vhost_virtio_ioctl_cmds, "");
static DEFINE_STRARRAY(vhost_virtio_ioctl_read_cmds, "");
struct strarray *s = (dir & _IOC_READ) ? &strarray__vhost_virtio_ioctl_read_cmds : &strarray__vhost_virtio_ioctl_cmds;
if (nr < s->nr_entries && s->entries[nr] != NULL)
return scnprintf(bf, size, "VHOST_%s", s->entries[nr]);
return scnprintf(bf, size, "(%#x, %#x, %#x)", 0xAF, nr, dir);
}
static size_t ioctl__scnprintf_perf_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/perf_ioctl_array.c"
static DEFINE_STRARRAY(perf_ioctl_cmds, "");
if (nr < strarray__perf_ioctl_cmds.nr_entries && strarray__perf_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "PERF_%s", strarray__perf_ioctl_cmds.entries[nr]);
return scnprintf(bf, size, "(%#x, %#x, %#x)", 0xAE, nr, dir);
}
static size_t ioctl__scnprintf_usbdevfs_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/usbdevfs_ioctl_array.c"
static DEFINE_STRARRAY(usbdevfs_ioctl_cmds, "");
if (nr < strarray__usbdevfs_ioctl_cmds.nr_entries && strarray__usbdevfs_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "USBDEVFS_%s", strarray__usbdevfs_ioctl_cmds.entries[nr]);
return scnprintf(bf, size, "(%c, %#x, %#x)", 'U', nr, dir);
}
static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, bool show_prefix)
{
const char *prefix = "_IOC_";
int dir = _IOC_DIR(cmd),
type = _IOC_TYPE(cmd),
nr = _IOC_NR(cmd),
sz = _IOC_SIZE(cmd);
int printed = 0;
static const struct ioctl_type {
int type;
size_t (*scnprintf)(int nr, int dir, char *bf, size_t size);
} ioctl_types[] = { /* Must be ordered by type */
{ .type = '$', .scnprintf = ioctl__scnprintf_perf_cmd, },
['A' - '$'] = { .type = 'A', .scnprintf = ioctl__scnprintf_sndrv_pcm_cmd, },
['T' - '$'] = { .type = 'T', .scnprintf = ioctl__scnprintf_tty_cmd, },
['U' - '$'] = { .type = 'U', .scnprintf = ioctl__scnprintf_sndrv_ctl_cmd, },
['d' - '$'] = { .type = 'd', .scnprintf = ioctl__scnprintf_drm_cmd, },
[0xAE - '$'] = { .type = 0xAE, .scnprintf = ioctl__scnprintf_kvm_cmd, },
[0xAF - '$'] = { .type = 0xAF, .scnprintf = ioctl__scnprintf_vhost_virtio_cmd, },
};
const int nr_types = ARRAY_SIZE(ioctl_types);
if (type >= ioctl_types[0].type && type <= ioctl_types[nr_types - 1].type) {
const int index = type - ioctl_types[0].type;
if (ioctl_types[index].scnprintf != NULL)
return ioctl_types[index].scnprintf(nr, dir, bf, size);
}
printed += scnprintf(bf + printed, size - printed, "%c", '(');
if (dir == _IOC_NONE) {
printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? prefix : "", "NONE");
} else {
if (dir & _IOC_READ)
printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? prefix : "", "READ");
if (dir & _IOC_WRITE) {
printed += scnprintf(bf + printed, size - printed, "%s%s%s", dir & _IOC_READ ? "|" : "",
show_prefix ? prefix : "", "WRITE");
}
}
return printed + scnprintf(bf + printed, size - printed, ", %#x, %#x, %#x)", type, nr, sz);
}
#ifndef USB_DEVICE_MAJOR
#define USB_DEVICE_MAJOR 189
#endif // USB_DEVICE_MAJOR
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
int fd = syscall_arg__val(arg, 0);
struct file *file = thread__files_entry(arg->thread, fd);
if (file != NULL) {
if (file->dev_maj == USB_DEVICE_MAJOR)
return ioctl__scnprintf_usbdevfs_cmd(_IOC_NR(cmd), _IOC_DIR(cmd), bf, size);
}
return ioctl__scnprintf_cmd(cmd, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/ioctl.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/arch_prctl.c
*
* Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include "trace/beauty/generated/x86_arch_prctl_code_array.c"
static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_1, "ARCH_", x86_arch_prctl_codes_1_offset);
static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_2, "ARCH_", x86_arch_prctl_codes_2_offset);
static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_3, "ARCH_", x86_arch_prctl_codes_3_offset);
static struct strarray *x86_arch_prctl_codes[] = {
&strarray__x86_arch_prctl_codes_1,
&strarray__x86_arch_prctl_codes_2,
&strarray__x86_arch_prctl_codes_3,
};
static DEFINE_STRARRAYS(x86_arch_prctl_codes);
static size_t x86_arch_prctl__scnprintf_code(int option, char *bf, size_t size, bool show_prefix)
{
return strarrays__scnprintf(&strarrays__x86_arch_prctl_codes, bf, size, "%#x", show_prefix, option);
}
size_t syscall_arg__scnprintf_x86_arch_prctl_code(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long code = arg->val;
return x86_arch_prctl__scnprintf_code(code, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/arch_prctl.c |
// SPDX-License-Identifier: LGPL-2.1
#include <linux/futex.h>
#ifndef FUTEX_WAIT_BITSET
#define FUTEX_WAIT_BITSET 9
#endif
#ifndef FUTEX_WAKE_BITSET
#define FUTEX_WAKE_BITSET 10
#endif
#ifndef FUTEX_WAIT_REQUEUE_PI
#define FUTEX_WAIT_REQUEUE_PI 11
#endif
#ifndef FUTEX_CMP_REQUEUE_PI
#define FUTEX_CMP_REQUEUE_PI 12
#endif
#ifndef FUTEX_CLOCK_REALTIME
#define FUTEX_CLOCK_REALTIME 256
#endif
static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "FUTEX_";
enum syscall_futex_args {
SCF_UADDR = (1 << 0),
SCF_OP = (1 << 1),
SCF_VAL = (1 << 2),
SCF_TIMEOUT = (1 << 3),
SCF_UADDR2 = (1 << 4),
SCF_VAL3 = (1 << 5),
};
int op = arg->val;
int cmd = op & FUTEX_CMD_MASK;
size_t printed = 0;
switch (cmd) {
#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n);
P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
P_FUTEX_OP(WAKE_OP); break;
P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
P_FUTEX_OP(WAIT_REQUEUE_PI); break;
default: printed = scnprintf(bf, size, "%#x", cmd); break;
}
if (op & FUTEX_PRIVATE_FLAG)
printed += scnprintf(bf + printed, size - printed, "|%s%s", show_prefix ? prefix : "", "PRIVATE_FLAG");
if (op & FUTEX_CLOCK_REALTIME)
printed += scnprintf(bf + printed, size - printed, "|%s%s", show_prefix ? prefix : "", "CLOCK_REALTIME");
return printed;
}
#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
| linux-master | tools/perf/trace/beauty/futex_op.c |
// SPDX-License-Identifier: LGPL-2.1
// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
#include "trace/beauty/beauty.h"
static size_t renameat2__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/rename_flags_array.c"
static DEFINE_STRARRAY(rename_flags, "RENAME_");
return strarray__scnprintf_flags(&strarray__rename_flags, bf, size, show_prefix, flags);
}
size_t syscall_arg__scnprintf_renameat2_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
return renameat2__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/renameat.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/statx.c
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include <sys/types.h>
#include <uapi/linux/fcntl.h>
#include <uapi/linux/stat.h>
size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "AT_";
int printed = 0, flags = arg->val;
if (flags == 0)
return scnprintf(bf, size, "%s%s", show_prefix ? "AT_STATX_" : "", "SYNC_AS_STAT");
#define P_FLAG(n) \
if (flags & AT_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~AT_##n; \
}
P_FLAG(SYMLINK_NOFOLLOW);
P_FLAG(REMOVEDIR);
P_FLAG(SYMLINK_FOLLOW);
P_FLAG(NO_AUTOMOUNT);
P_FLAG(EMPTY_PATH);
P_FLAG(STATX_FORCE_SYNC);
P_FLAG(STATX_DONT_SYNC);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "STATX_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & STATX_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~STATX_##n; \
}
P_FLAG(TYPE);
P_FLAG(MODE);
P_FLAG(NLINK);
P_FLAG(UID);
P_FLAG(GID);
P_FLAG(ATIME);
P_FLAG(MTIME);
P_FLAG(CTIME);
P_FLAG(INO);
P_FLAG(SIZE);
P_FLAG(BLOCKS);
P_FLAG(BTIME);
P_FLAG(MNT_ID);
P_FLAG(DIOALIGN);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
| linux-master | tools/perf/trace/beauty/statx.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/fsmount.c
*
* Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/log2.h>
#include <uapi/linux/mount.h>
static size_t fsmount__scnprintf_attr_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/fsmount_arrays.c"
static DEFINE_STRARRAY(fsmount_attr_flags, "MOUNT_ATTR_");
size_t printed = 0;
if ((flags & ~MOUNT_ATTR__ATIME) != 0)
printed += strarray__scnprintf_flags(&strarray__fsmount_attr_flags, bf, size, show_prefix, flags);
if ((flags & MOUNT_ATTR__ATIME) == MOUNT_ATTR_RELATIME) {
printed += scnprintf(bf + printed, size - printed, "%s%s%s",
printed ? "|" : "", show_prefix ? "MOUNT_ATTR_" : "", "RELATIME");
}
return printed;
}
size_t syscall_arg__scnprintf_fsmount_attr_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
return fsmount__scnprintf_attr_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/fsmount.c |
// SPDX-License-Identifier: LGPL-2.1
#include <linux/futex.h>
#ifndef FUTEX_BITSET_MATCH_ANY
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
#endif
static size_t syscall_arg__scnprintf_futex_val3(char *bf, size_t size, struct syscall_arg *arg)
{
const char *prefix = "FUTEX_BITSET_";
unsigned int bitset = arg->val;
if (bitset == FUTEX_BITSET_MATCH_ANY)
return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "MATCH_ANY");
return scnprintf(bf, size, "%#xd", bitset);
}
#define SCA_FUTEX_VAL3 syscall_arg__scnprintf_futex_val3
| linux-master | tools/perf/trace/beauty/futex_val3.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/mount_flags.c
*
* Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <sys/mount.h>
static size_t mount__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/mount_flags_array.c"
static DEFINE_STRARRAY(mount_flags, "MS_");
return strarray__scnprintf_flags(&strarray__mount_flags, bf, size, show_prefix, flags);
}
unsigned long syscall_arg__mask_val_mount_flags(struct syscall_arg *arg __maybe_unused, unsigned long flags)
{
// do_mount in fs/namespace.c:
/*
* Pre-0.97 versions of mount() didn't have a flags word. When the
* flags word was introduced its top half was required to have the
* magic value 0xC0ED, and this remained so until 2.4.0-test9.
* Therefore, if this magic number is present, it carries no
* information and must be discarded.
*/
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
flags &= ~MS_MGC_MSK;
return flags;
}
size_t syscall_arg__scnprintf_mount_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
return mount__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/mount_flags.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/fcntl.c
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include <uapi/linux/fcntl.h>
static size_t fcntl__scnprintf_getfd(unsigned long val, char *bf, size_t size, bool show_prefix)
{
return val ? scnprintf(bf, size, "%s", "0") :
scnprintf(bf, size, "%s%s", show_prefix ? "FD_" : "", "CLOEXEC");
}
static size_t syscall_arg__scnprintf_fcntl_getfd(char *bf, size_t size, struct syscall_arg *arg)
{
return fcntl__scnprintf_getfd(arg->val, bf, size, arg->show_string_prefix);
}
static size_t fcntl__scnprintf_getlease(unsigned long val, char *bf, size_t size, bool show_prefix)
{
static const char *fcntl_setlease[] = { "RDLCK", "WRLCK", "UNLCK", };
static DEFINE_STRARRAY(fcntl_setlease, "F_");
return strarray__scnprintf(&strarray__fcntl_setlease, bf, size, "%x", show_prefix, val);
}
static size_t syscall_arg__scnprintf_fcntl_getlease(char *bf, size_t size, struct syscall_arg *arg)
{
return fcntl__scnprintf_getlease(arg->val, bf, size, arg->show_string_prefix);
}
size_t syscall_arg__scnprintf_fcntl_cmd(char *bf, size_t size, struct syscall_arg *arg)
{
if (arg->val == F_GETFL) {
syscall_arg__set_ret_scnprintf(arg, syscall_arg__scnprintf_open_flags);
goto mask_arg;
}
if (arg->val == F_GETFD) {
syscall_arg__set_ret_scnprintf(arg, syscall_arg__scnprintf_fcntl_getfd);
goto mask_arg;
}
if (arg->val == F_DUPFD_CLOEXEC || arg->val == F_DUPFD) {
syscall_arg__set_ret_scnprintf(arg, syscall_arg__scnprintf_fd);
goto out;
}
if (arg->val == F_GETOWN) {
syscall_arg__set_ret_scnprintf(arg, syscall_arg__scnprintf_pid);
goto mask_arg;
}
if (arg->val == F_GETLEASE) {
syscall_arg__set_ret_scnprintf(arg, syscall_arg__scnprintf_fcntl_getlease);
goto mask_arg;
}
/*
* Some commands ignore the third fcntl argument, "arg", so mask it
*/
if (arg->val == F_GET_SEALS ||
arg->val == F_GETSIG) {
mask_arg:
arg->mask |= (1 << 2);
}
out:
return syscall_arg__scnprintf_strarrays(bf, size, arg);
}
size_t syscall_arg__scnprintf_fcntl_arg(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
int cmd = syscall_arg__val(arg, 1);
if (cmd == F_DUPFD)
return syscall_arg__scnprintf_fd(bf, size, arg);
if (cmd == F_SETFD)
return fcntl__scnprintf_getfd(arg->val, bf, size, show_prefix);
if (cmd == F_SETFL)
return open__scnprintf_flags(arg->val, bf, size, show_prefix);
if (cmd == F_SETOWN)
return syscall_arg__scnprintf_pid(bf, size, arg);
if (cmd == F_SETLEASE)
return fcntl__scnprintf_getlease(arg->val, bf, size, show_prefix);
/*
* We still don't grab the contents of pointers on entry or exit,
* so just print them as hex numbers
*/
if (cmd == F_SETLK || cmd == F_SETLKW || cmd == F_GETLK ||
cmd == F_OFD_SETLK || cmd == F_OFD_SETLKW || cmd == F_OFD_GETLK ||
cmd == F_GETOWN_EX || cmd == F_SETOWN_EX ||
cmd == F_GET_RW_HINT || cmd == F_SET_RW_HINT ||
cmd == F_GET_FILE_RW_HINT || cmd == F_SET_FILE_RW_HINT)
return syscall_arg__scnprintf_hex(bf, size, arg);
return syscall_arg__scnprintf_long(bf, size, arg);
}
| linux-master | tools/perf/trace/beauty/fcntl.c |
// SPDX-License-Identifier: LGPL-2.1
#ifndef EFD_SEMAPHORE
#define EFD_SEMAPHORE 1
#endif
#ifndef EFD_NONBLOCK
#define EFD_NONBLOCK 00004000
#endif
#ifndef EFD_CLOEXEC
#define EFD_CLOEXEC 02000000
#endif
static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size, struct syscall_arg *arg)
{
bool show_prefix = arg->show_string_prefix;
const char *prefix = "EFD_";
int printed = 0, flags = arg->val;
if (flags == 0)
return scnprintf(bf, size, "NONE");
#define P_FLAG(n) \
if (flags & EFD_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~EFD_##n; \
}
P_FLAG(SEMAPHORE);
P_FLAG(CLOEXEC);
P_FLAG(NONBLOCK);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
| linux-master | tools/perf/trace/beauty/eventfd.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/cone.c
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/kernel.h>
#include <sys/types.h>
#include <uapi/linux/sched.h>
static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
const char *prefix = "CLONE_";
int printed = 0;
#define P_FLAG(n) \
if (flags & CLONE_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~CLONE_##n; \
}
P_FLAG(VM);
P_FLAG(FS);
P_FLAG(FILES);
P_FLAG(SIGHAND);
P_FLAG(PIDFD);
P_FLAG(PTRACE);
P_FLAG(VFORK);
P_FLAG(PARENT);
P_FLAG(THREAD);
P_FLAG(NEWNS);
P_FLAG(SYSVSEM);
P_FLAG(SETTLS);
P_FLAG(PARENT_SETTID);
P_FLAG(CHILD_CLEARTID);
P_FLAG(DETACHED);
P_FLAG(UNTRACED);
P_FLAG(CHILD_SETTID);
P_FLAG(NEWCGROUP);
P_FLAG(NEWUTS);
P_FLAG(NEWIPC);
P_FLAG(NEWUSER);
P_FLAG(NEWPID);
P_FLAG(NEWNET);
P_FLAG(IO);
P_FLAG(CLEAR_SIGHAND);
P_FLAG(INTO_CGROUP);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
size_t syscall_arg__scnprintf_clone_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
enum syscall_clone_args {
SCC_FLAGS = (1 << 0),
SCC_CHILD_STACK = (1 << 1),
SCC_PARENT_TIDPTR = (1 << 2),
SCC_CHILD_TIDPTR = (1 << 3),
SCC_TLS = (1 << 4),
};
if (!(flags & CLONE_PARENT_SETTID))
arg->mask |= SCC_PARENT_TIDPTR;
if (!(flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)))
arg->mask |= SCC_CHILD_TIDPTR;
if (!(flags & CLONE_SETTLS))
arg->mask |= SCC_TLS;
return clone__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/clone.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/sync_file_range.c
*
* Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include <linux/log2.h>
#include <uapi/linux/fs.h>
static size_t sync_file_range__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/sync_file_range_arrays.c"
static DEFINE_STRARRAY(sync_file_range_flags, "SYNC_FILE_RANGE_");
size_t printed = 0;
if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) == SYNC_FILE_RANGE_WRITE_AND_WAIT) {
printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? "SYNC_FILE_RANGE_" : "", "WRITE_AND_WAIT");
flags &= ~SYNC_FILE_RANGE_WRITE_AND_WAIT;
}
return printed + strarray__scnprintf_flags(&strarray__sync_file_range_flags, bf + printed, size - printed, show_prefix, flags);
}
size_t syscall_arg__scnprintf_sync_file_range_flags(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
return sync_file_range__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/sync_file_range.c |
// SPDX-License-Identifier: LGPL-2.1
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#ifndef O_DIRECT
#define O_DIRECT 00040000
#endif
#ifndef O_DIRECTORY
#define O_DIRECTORY 00200000
#endif
#ifndef O_NOATIME
#define O_NOATIME 01000000
#endif
#ifndef O_TMPFILE
#define O_TMPFILE 020000000
#endif
#undef O_LARGEFILE
#define O_LARGEFILE 00100000
size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
const char *prefix = "O_";
int printed = 0;
if ((flags & O_ACCMODE) == O_RDONLY)
printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", "RDONLY");
if (flags == 0)
return printed;
#define P_FLAG(n) \
if (flags & O_##n) { \
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~O_##n; \
}
P_FLAG(RDWR);
P_FLAG(APPEND);
P_FLAG(ASYNC);
P_FLAG(CLOEXEC);
P_FLAG(CREAT);
P_FLAG(DIRECT);
P_FLAG(DIRECTORY);
P_FLAG(EXCL);
P_FLAG(LARGEFILE);
P_FLAG(NOFOLLOW);
P_FLAG(TMPFILE);
P_FLAG(NOATIME);
P_FLAG(NOCTTY);
#ifdef O_NONBLOCK
P_FLAG(NONBLOCK);
#elif O_NDELAY
P_FLAG(NDELAY);
#endif
#ifdef O_PATH
P_FLAG(PATH);
#endif
#ifdef O_DSYNC
if ((flags & O_SYNC) == O_SYNC)
printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", "SYNC");
else {
P_FLAG(DSYNC);
}
#else
P_FLAG(SYNC);
#endif
P_FLAG(TRUNC);
P_FLAG(WRONLY);
#undef P_FLAG
if (flags)
printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
return printed;
}
size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, struct syscall_arg *arg)
{
int flags = arg->val;
if (!(flags & O_CREAT))
arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
return open__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
| linux-master | tools/perf/trace/beauty/open_flags.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/x86_irq_vectors.c
*
* Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include "trace/beauty/generated/x86_arch_irq_vectors_array.c"
static DEFINE_STRARRAY(x86_irq_vectors, "_VECTOR");
static size_t x86_irq_vectors__scnprintf(unsigned long vector, char *bf, size_t size, bool show_prefix)
{
return strarray__scnprintf_suffix(&strarray__x86_irq_vectors, bf, size, "%#x", show_prefix, vector);
}
size_t syscall_arg__scnprintf_x86_irq_vectors(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long vector = arg->val;
return x86_irq_vectors__scnprintf(vector, bf, size, arg->show_string_prefix);
}
bool syscall_arg__strtoul_x86_irq_vectors(char *bf, size_t size, struct syscall_arg *arg __maybe_unused, u64 *ret)
{
return strarray__strtoul(&strarray__x86_irq_vectors, bf, size, ret);
}
| linux-master | tools/perf/trace/beauty/tracepoints/x86_irq_vectors.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* trace/beauty/x86_msr.c
*
* Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*/
#include "trace/beauty/beauty.h"
#include "trace/beauty/generated/x86_arch_MSRs_array.c"
static DEFINE_STRARRAY(x86_MSRs, "MSR_");
static DEFINE_STRARRAY_OFFSET(x86_64_specific_MSRs, "MSR_", x86_64_specific_MSRs_offset);
static DEFINE_STRARRAY_OFFSET(x86_AMD_V_KVM_MSRs, "MSR_", x86_AMD_V_KVM_MSRs_offset);
static struct strarray *x86_MSRs_tables[] = {
&strarray__x86_MSRs,
&strarray__x86_64_specific_MSRs,
&strarray__x86_AMD_V_KVM_MSRs,
};
static DEFINE_STRARRAYS(x86_MSRs_tables);
static size_t x86_MSR__scnprintf(unsigned long msr, char *bf, size_t size, bool show_prefix)
{
return strarrays__scnprintf(&strarrays__x86_MSRs_tables, bf, size, "%#x", show_prefix, msr);
}
size_t syscall_arg__scnprintf_x86_MSR(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long flags = arg->val;
return x86_MSR__scnprintf(flags, bf, size, arg->show_string_prefix);
}
bool syscall_arg__strtoul_x86_MSR(char *bf, size_t size, struct syscall_arg *arg __maybe_unused, u64 *ret)
{
return strarrays__strtoul(&strarrays__x86_MSRs_tables, bf, size, ret);
}
| linux-master | tools/perf/trace/beauty/tracepoints/x86_msr.c |
#include <stdio.h>
#include <limits.h>
#include <string.h>
#include <stdlib.h>
#include "spark.h"
#include "stat.h"
#define SPARK_SHIFT 8
/* Print spark lines on outf for numval values in val. */
int print_spark(char *bf, int size, unsigned long *val, int numval)
{
static const char *ticks[NUM_SPARKS] = {
"▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"
};
int i, printed = 0;
unsigned long min = ULONG_MAX, max = 0, f;
for (i = 0; i < numval; i++) {
if (val[i] < min)
min = val[i];
if (val[i] > max)
max = val[i];
}
f = ((max - min) << SPARK_SHIFT) / (NUM_SPARKS - 1);
if (f < 1)
f = 1;
for (i = 0; i < numval; i++) {
printed += scnprintf(bf + printed, size - printed, "%s",
ticks[((val[i] - min) << SPARK_SHIFT) / f]);
}
return printed;
}
| linux-master | tools/perf/util/spark.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
#include <sys/types.h>
#include <ctype.h>
#include <dirent.h>
#include <pthread.h>
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "evsel.h"
#include "pmus.h"
#include "pmu.h"
#include "print-events.h"
/*
* core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
* directory contains "cpus" file. All PMUs belonging to core_pmus
* must have pmu->is_core=1. If there are more than one PMU in
* this list, perf interprets it as a heterogeneous platform.
* (FWIW, certain ARM platforms having heterogeneous cores uses
* homogeneous PMU, and thus they are treated as homogeneous
* platform by perf because core_pmus will have only one entry)
* other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
* matter whether PMU is present per SMT-thread or outside of the
* core in the hw. For e.g., an instance of AMD ibs_fetch// and
* ibs_op// PMUs is present in each hw SMT thread, however they
* are captured under other_pmus. PMUs belonging to other_pmus
* must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
*/
static LIST_HEAD(core_pmus);
static LIST_HEAD(other_pmus);
static bool read_sysfs_core_pmus;
static bool read_sysfs_all_pmus;
int pmu_name_len_no_suffix(const char *str, unsigned long *num)
{
int orig_len, len;
orig_len = len = strlen(str);
/* Non-uncore PMUs have their full length, for example, i915. */
if (!strstarts(str, "uncore_"))
return len;
/*
* Count trailing digits and '_', if '_{num}' suffix isn't present use
* the full length.
*/
while (len > 0 && isdigit(str[len - 1]))
len--;
if (len > 0 && len != orig_len && str[len - 1] == '_') {
if (num)
*num = strtoul(&str[len], NULL, 10);
return len - 1;
}
return orig_len;
}
void perf_pmus__destroy(void)
{
struct perf_pmu *pmu, *tmp;
list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
list_del(&pmu->list);
perf_pmu__delete(pmu);
}
list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
list_del(&pmu->list);
perf_pmu__delete(pmu);
}
read_sysfs_core_pmus = false;
read_sysfs_all_pmus = false;
}
static struct perf_pmu *pmu_find(const char *name)
{
struct perf_pmu *pmu;
list_for_each_entry(pmu, &core_pmus, list) {
if (!strcmp(pmu->name, name) ||
(pmu->alias_name && !strcmp(pmu->alias_name, name)))
return pmu;
}
list_for_each_entry(pmu, &other_pmus, list) {
if (!strcmp(pmu->name, name) ||
(pmu->alias_name && !strcmp(pmu->alias_name, name)))
return pmu;
}
return NULL;
}
struct perf_pmu *perf_pmus__find(const char *name)
{
struct perf_pmu *pmu;
int dirfd;
bool core_pmu;
/*
* Once PMU is loaded it stays in the list,
* so we keep us from multiple reading/parsing
* the pmu format definitions.
*/
pmu = pmu_find(name);
if (pmu)
return pmu;
if (read_sysfs_all_pmus)
return NULL;
core_pmu = is_pmu_core(name);
if (core_pmu && read_sysfs_core_pmus)
return NULL;
dirfd = perf_pmu__event_source_devices_fd();
pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
close(dirfd);
return pmu;
}
static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
{
struct perf_pmu *pmu;
bool core_pmu;
/*
* Once PMU is loaded it stays in the list,
* so we keep us from multiple reading/parsing
* the pmu format definitions.
*/
pmu = pmu_find(name);
if (pmu)
return pmu;
if (read_sysfs_all_pmus)
return NULL;
core_pmu = is_pmu_core(name);
if (core_pmu && read_sysfs_core_pmus)
return NULL;
return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
}
static int pmus_cmp(void *priv __maybe_unused,
const struct list_head *lhs, const struct list_head *rhs)
{
unsigned long lhs_num = 0, rhs_num = 0;
struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
const char *lhs_pmu_name = lhs_pmu->name ?: "";
const char *rhs_pmu_name = rhs_pmu->name ?: "";
int lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name, &lhs_num);
int rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name, &rhs_num);
int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
return ret;
return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
}
/* Add all pmus in sysfs to pmu list: */
static void pmu_read_sysfs(bool core_only)
{
int fd;
DIR *dir;
struct dirent *dent;
if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
return;
fd = perf_pmu__event_source_devices_fd();
if (fd < 0)
return;
dir = fdopendir(fd);
if (!dir) {
close(fd);
return;
}
while ((dent = readdir(dir))) {
if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
continue;
if (core_only && !is_pmu_core(dent->d_name))
continue;
/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
perf_pmu__find2(fd, dent->d_name);
}
closedir(dir);
if (list_empty(&core_pmus)) {
if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
pr_err("Failure to set up any core PMUs\n");
}
list_sort(NULL, &core_pmus, pmus_cmp);
list_sort(NULL, &other_pmus, pmus_cmp);
if (!list_empty(&core_pmus)) {
read_sysfs_core_pmus = true;
if (!core_only)
read_sysfs_all_pmus = true;
}
}
static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
{
struct perf_pmu *pmu;
list_for_each_entry(pmu, &core_pmus, list) {
if (pmu->type == type)
return pmu;
}
list_for_each_entry(pmu, &other_pmus, list) {
if (pmu->type == type)
return pmu;
}
return NULL;
}
struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
{
struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
if (pmu || read_sysfs_all_pmus)
return pmu;
pmu_read_sysfs(/*core_only=*/false);
pmu = __perf_pmus__find_by_type(type);
return pmu;
}
/*
* pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
* next pmu. Returns NULL on end.
*/
struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
{
bool use_core_pmus = !pmu || pmu->is_core;
if (!pmu) {
pmu_read_sysfs(/*core_only=*/false);
pmu = list_prepare_entry(pmu, &core_pmus, list);
}
if (use_core_pmus) {
list_for_each_entry_continue(pmu, &core_pmus, list)
return pmu;
pmu = NULL;
pmu = list_prepare_entry(pmu, &other_pmus, list);
}
list_for_each_entry_continue(pmu, &other_pmus, list)
return pmu;
return NULL;
}
struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
{
if (!pmu) {
pmu_read_sysfs(/*core_only=*/true);
pmu = list_prepare_entry(pmu, &core_pmus, list);
}
list_for_each_entry_continue(pmu, &core_pmus, list)
return pmu;
return NULL;
}
static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
{
bool use_core_pmus = !pmu || pmu->is_core;
int last_pmu_name_len = 0;
const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
if (!pmu) {
pmu_read_sysfs(/*core_only=*/false);
pmu = list_prepare_entry(pmu, &core_pmus, list);
} else
last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", NULL);
if (use_core_pmus) {
list_for_each_entry_continue(pmu, &core_pmus, list) {
int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
if (last_pmu_name_len == pmu_name_len &&
!strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
continue;
return pmu;
}
pmu = NULL;
pmu = list_prepare_entry(pmu, &other_pmus, list);
}
list_for_each_entry_continue(pmu, &other_pmus, list) {
int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
if (last_pmu_name_len == pmu_name_len &&
!strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
continue;
return pmu;
}
return NULL;
}
const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
{
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
if (!strcmp(pmu->name, str))
return pmu;
/* Ignore "uncore_" prefix. */
if (!strncmp(pmu->name, "uncore_", 7)) {
if (!strcmp(pmu->name + 7, str))
return pmu;
}
/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
if (!strncmp(pmu->name, "cpu_", 4)) {
if (!strcmp(pmu->name + 4, str))
return pmu;
}
}
return NULL;
}
int __weak perf_pmus__num_mem_pmus(void)
{
/* All core PMUs are for mem events. */
return perf_pmus__num_core_pmus();
}
/** Struct for ordering events as output in perf list. */
struct sevent {
/** PMU for event. */
const struct perf_pmu *pmu;
const char *name;
const char* alias;
const char *scale_unit;
const char *desc;
const char *long_desc;
const char *encoding_desc;
const char *topic;
const char *pmu_name;
bool deprecated;
};
static int cmp_sevent(const void *a, const void *b)
{
const struct sevent *as = a;
const struct sevent *bs = b;
bool a_iscpu, b_iscpu;
int ret;
/* Put extra events last. */
if (!!as->desc != !!bs->desc)
return !!as->desc - !!bs->desc;
/* Order by topics. */
ret = strcmp(as->topic ?: "", bs->topic ?: "");
if (ret)
return ret;
/* Order CPU core events to be first */
a_iscpu = as->pmu ? as->pmu->is_core : true;
b_iscpu = bs->pmu ? bs->pmu->is_core : true;
if (a_iscpu != b_iscpu)
return a_iscpu ? -1 : 1;
/* Order by PMU name. */
if (as->pmu != bs->pmu) {
ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
if (ret)
return ret;
}
/* Order by event name. */
return strcmp(as->name, bs->name);
}
static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
{
/* Different names -> never duplicates */
if (strcmp(a->name ?: "//", b->name ?: "//"))
return false;
/* Don't remove duplicates for different PMUs */
return strcmp(a->pmu_name, b->pmu_name) == 0;
}
struct events_callback_state {
struct sevent *aliases;
size_t aliases_len;
size_t index;
};
static int perf_pmus__print_pmu_events__callback(void *vstate,
struct pmu_event_info *info)
{
struct events_callback_state *state = vstate;
struct sevent *s;
if (state->index >= state->aliases_len) {
pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
return 1;
}
s = &state->aliases[state->index];
s->pmu = info->pmu;
#define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
COPY_STR(name);
COPY_STR(alias);
COPY_STR(scale_unit);
COPY_STR(desc);
COPY_STR(long_desc);
COPY_STR(encoding_desc);
COPY_STR(topic);
COPY_STR(pmu_name);
#undef COPY_STR
s->deprecated = info->deprecated;
state->index++;
return 0;
}
void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
{
struct perf_pmu *pmu;
int printed = 0;
int len;
struct sevent *aliases;
struct events_callback_state state;
bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
struct perf_pmu *(*scan_fn)(struct perf_pmu *);
if (skip_duplicate_pmus)
scan_fn = perf_pmus__scan_skip_duplicates;
else
scan_fn = perf_pmus__scan;
pmu = NULL;
len = 0;
while ((pmu = scan_fn(pmu)) != NULL)
len += perf_pmu__num_events(pmu);
aliases = zalloc(sizeof(struct sevent) * len);
if (!aliases) {
pr_err("FATAL: not enough memory to print PMU events\n");
return;
}
pmu = NULL;
state = (struct events_callback_state) {
.aliases = aliases,
.aliases_len = len,
.index = 0,
};
while ((pmu = scan_fn(pmu)) != NULL) {
perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
perf_pmus__print_pmu_events__callback);
}
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
for (int j = 0; j < len; j++) {
/* Skip duplicates */
if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
continue;
print_cb->print_event(print_state,
aliases[j].pmu_name,
aliases[j].topic,
aliases[j].name,
aliases[j].alias,
aliases[j].scale_unit,
aliases[j].deprecated,
"Kernel PMU event",
aliases[j].desc,
aliases[j].long_desc,
aliases[j].encoding_desc);
zfree(&aliases[j].name);
zfree(&aliases[j].alias);
zfree(&aliases[j].scale_unit);
zfree(&aliases[j].desc);
zfree(&aliases[j].long_desc);
zfree(&aliases[j].encoding_desc);
zfree(&aliases[j].topic);
zfree(&aliases[j].pmu_name);
}
if (printed && pager_in_use())
printf("\n");
zfree(&aliases);
}
bool perf_pmus__have_event(const char *pname, const char *name)
{
struct perf_pmu *pmu = perf_pmus__find(pname);
return pmu && perf_pmu__have_event(pmu, name);
}
int perf_pmus__num_core_pmus(void)
{
static int count;
if (!count) {
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
count++;
}
return count;
}
static bool __perf_pmus__supports_extended_type(void)
{
struct perf_pmu *pmu = NULL;
if (perf_pmus__num_core_pmus() <= 1)
return false;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
return false;
}
return true;
}
static bool perf_pmus__do_support_extended_type;
static void perf_pmus__init_supports_extended_type(void)
{
perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
}
bool perf_pmus__supports_extended_type(void)
{
static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
return perf_pmus__do_support_extended_type;
}
char *perf_pmus__default_pmu_name(void)
{
int fd;
DIR *dir;
struct dirent *dent;
char *result = NULL;
if (!list_empty(&core_pmus))
return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
fd = perf_pmu__event_source_devices_fd();
if (fd < 0)
return strdup("cpu");
dir = fdopendir(fd);
if (!dir) {
close(fd);
return strdup("cpu");
}
while ((dent = readdir(dir))) {
if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
continue;
if (is_pmu_core(dent->d_name)) {
result = strdup(dent->d_name);
break;
}
}
closedir(dir);
return result ?: strdup("cpu");
}
struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
{
struct perf_pmu *pmu = evsel->pmu;
if (!pmu) {
pmu = perf_pmus__find_by_type(evsel->core.attr.type);
((struct evsel *)evsel)->pmu = pmu;
}
return pmu;
}
| linux-master | tools/perf/util/pmus.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "symbol.h"
#include "demangle-java.h"
#include <linux/ctype.h>
#include <linux/kernel.h>
enum {
MODE_PREFIX = 0,
MODE_CLASS = 1,
MODE_FUNC = 2,
MODE_TYPE = 3,
MODE_CTYPE = 4, /* class arg */
};
#define BASE_ENT(c, n) [c - 'A']=n
static const char *base_types['Z' - 'A' + 1] = {
BASE_ENT('B', "byte" ),
BASE_ENT('C', "char" ),
BASE_ENT('D', "double" ),
BASE_ENT('F', "float" ),
BASE_ENT('I', "int" ),
BASE_ENT('J', "long" ),
BASE_ENT('S', "short" ),
BASE_ENT('Z', "boolean" ),
};
/*
* demangle Java symbol between str and end positions and stores
* up to maxlen characters into buf. The parser starts in mode.
*
* Use MODE_PREFIX to process entire prototype till end position
* Use MODE_TYPE to process return type if str starts on return type char
*
* Return:
* success: buf
* error : NULL
*/
static char *
__demangle_java_sym(const char *str, const char *end, char *buf, int maxlen, int mode)
{
int rlen = 0;
int array = 0;
int narg = 0;
const char *q;
if (!end)
end = str + strlen(str);
for (q = str; q != end; q++) {
if (rlen == (maxlen - 1))
break;
switch (*q) {
case 'L':
if (mode == MODE_PREFIX || mode == MODE_TYPE) {
if (mode == MODE_TYPE) {
if (narg)
rlen += scnprintf(buf + rlen, maxlen - rlen, ", ");
narg++;
}
if (mode == MODE_PREFIX)
mode = MODE_CLASS;
else
mode = MODE_CTYPE;
} else
buf[rlen++] = *q;
break;
case 'B':
case 'C':
case 'D':
case 'F':
case 'I':
case 'J':
case 'S':
case 'Z':
if (mode == MODE_TYPE) {
if (narg)
rlen += scnprintf(buf + rlen, maxlen - rlen, ", ");
rlen += scnprintf(buf + rlen, maxlen - rlen, "%s", base_types[*q - 'A']);
while (array--)
rlen += scnprintf(buf + rlen, maxlen - rlen, "[]");
array = 0;
narg++;
} else
buf[rlen++] = *q;
break;
case 'V':
if (mode == MODE_TYPE) {
rlen += scnprintf(buf + rlen, maxlen - rlen, "void");
while (array--)
rlen += scnprintf(buf + rlen, maxlen - rlen, "[]");
array = 0;
} else
buf[rlen++] = *q;
break;
case '[':
if (mode != MODE_TYPE)
goto error;
array++;
break;
case '(':
if (mode != MODE_FUNC)
goto error;
buf[rlen++] = *q;
mode = MODE_TYPE;
break;
case ')':
if (mode != MODE_TYPE)
goto error;
buf[rlen++] = *q;
narg = 0;
break;
case ';':
if (mode != MODE_CLASS && mode != MODE_CTYPE)
goto error;
/* safe because at least one other char to process */
if (isalpha(*(q + 1)) && mode == MODE_CLASS)
rlen += scnprintf(buf + rlen, maxlen - rlen, ".");
if (mode == MODE_CLASS)
mode = MODE_FUNC;
else if (mode == MODE_CTYPE)
mode = MODE_TYPE;
break;
case '/':
if (mode != MODE_CLASS && mode != MODE_CTYPE)
goto error;
rlen += scnprintf(buf + rlen, maxlen - rlen, ".");
break;
default :
buf[rlen++] = *q;
}
}
buf[rlen] = '\0';
return buf;
error:
return NULL;
}
/*
* Demangle Java function signature (openJDK, not GCJ)
* input:
* str: string to parse. String is not modified
* flags: combination of JAVA_DEMANGLE_* flags to modify demangling
* return:
* if input can be demangled, then a newly allocated string is returned.
* if input cannot be demangled, then NULL is returned
*
* Note: caller is responsible for freeing demangled string
*/
char *
java_demangle_sym(const char *str, int flags)
{
char *buf, *ptr;
char *p;
size_t len, l1 = 0;
if (!str)
return NULL;
/* find start of return type */
p = strrchr(str, ')');
if (!p)
return NULL;
/*
* expansion factor estimated to 3x
*/
len = strlen(str) * 3 + 1;
buf = malloc(len);
if (!buf)
return NULL;
buf[0] = '\0';
if (!(flags & JAVA_DEMANGLE_NORET)) {
/*
* get return type first
*/
ptr = __demangle_java_sym(p + 1, NULL, buf, len, MODE_TYPE);
if (!ptr)
goto error;
/* add space between return type and function prototype */
l1 = strlen(buf);
buf[l1++] = ' ';
}
/* process function up to return type */
ptr = __demangle_java_sym(str, p + 1, buf + l1, len - l1, MODE_PREFIX);
if (!ptr)
goto error;
return buf;
error:
free(buf);
return NULL;
}
| linux-master | tools/perf/util/demangle-java.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple pointer stack
*
* (c) 2010 Arnaldo Carvalho de Melo <[email protected]>
*/
#include "pstack.h"
#include "debug.h"
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include <string.h>
struct pstack {
unsigned short top;
unsigned short max_nr_entries;
void *entries[];
};
struct pstack *pstack__new(unsigned short max_nr_entries)
{
struct pstack *pstack = zalloc((sizeof(*pstack) +
max_nr_entries * sizeof(void *)));
if (pstack != NULL)
pstack->max_nr_entries = max_nr_entries;
return pstack;
}
void pstack__delete(struct pstack *pstack)
{
free(pstack);
}
bool pstack__empty(const struct pstack *pstack)
{
return pstack->top == 0;
}
void pstack__remove(struct pstack *pstack, void *key)
{
unsigned short i = pstack->top, last_index = pstack->top - 1;
while (i-- != 0) {
if (pstack->entries[i] == key) {
if (i < last_index)
memmove(pstack->entries + i,
pstack->entries + i + 1,
(last_index - i) * sizeof(void *));
--pstack->top;
return;
}
}
pr_err("%s: %p not on the pstack!\n", __func__, key);
}
void pstack__push(struct pstack *pstack, void *key)
{
if (pstack->top == pstack->max_nr_entries) {
pr_err("%s: top=%d, overflow!\n", __func__, pstack->top);
return;
}
pstack->entries[pstack->top++] = key;
}
void *pstack__pop(struct pstack *pstack)
{
void *ret;
if (pstack->top == 0) {
pr_err("%s: underflow!\n", __func__);
return NULL;
}
ret = pstack->entries[--pstack->top];
pstack->entries[pstack->top] = NULL;
return ret;
}
void *pstack__peek(struct pstack *pstack)
{
if (pstack->top == 0)
return NULL;
return pstack->entries[pstack->top - 1];
}
| linux-master | tools/perf/util/pstack.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Manage printing of source lines
* Copyright (c) 2017, Intel Corporation.
* Author: Andi Kleen
*/
#include <linux/list.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
#include "srccode.h"
#include "debug.h"
#include <internal/lib.h> // page_size
#include "fncache.h"
#define MAXSRCCACHE (32*1024*1024)
#define MAXSRCFILES 64
#define SRC_HTAB_SZ 64
struct srcfile {
struct hlist_node hash_nd;
struct list_head nd;
char *fn;
char **lines;
char *map;
unsigned numlines;
size_t maplen;
};
static struct hlist_head srcfile_htab[SRC_HTAB_SZ];
static LIST_HEAD(srcfile_list);
static long map_total_sz;
static int num_srcfiles;
static int countlines(char *map, int maplen)
{
int numl;
char *end = map + maplen;
char *p = map;
if (maplen == 0)
return 0;
numl = 0;
while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
numl++;
p++;
}
if (p < end)
numl++;
return numl;
}
static void fill_lines(char **lines, int maxline, char *map, int maplen)
{
int l;
char *end = map + maplen;
char *p = map;
if (maplen == 0 || maxline == 0)
return;
l = 0;
lines[l++] = map;
while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
if (l >= maxline)
return;
lines[l++] = ++p;
}
if (p < end)
lines[l] = p;
}
static void free_srcfile(struct srcfile *sf)
{
list_del_init(&sf->nd);
hlist_del(&sf->hash_nd);
map_total_sz -= sf->maplen;
munmap(sf->map, sf->maplen);
zfree(&sf->lines);
zfree(&sf->fn);
free(sf);
num_srcfiles--;
}
static struct srcfile *find_srcfile(char *fn)
{
struct stat st;
struct srcfile *h;
int fd;
unsigned long sz;
unsigned hval = shash((unsigned char *)fn) % SRC_HTAB_SZ;
hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) {
if (!strcmp(fn, h->fn)) {
/* Move to front */
list_move(&h->nd, &srcfile_list);
return h;
}
}
/* Only prune if there is more than one entry */
while ((num_srcfiles > MAXSRCFILES || map_total_sz > MAXSRCCACHE) &&
srcfile_list.next != &srcfile_list) {
assert(!list_empty(&srcfile_list));
h = list_entry(srcfile_list.prev, struct srcfile, nd);
free_srcfile(h);
}
fd = open(fn, O_RDONLY);
if (fd < 0 || fstat(fd, &st) < 0) {
pr_debug("cannot open source file %s\n", fn);
return NULL;
}
h = malloc(sizeof(struct srcfile));
if (!h)
return NULL;
h->fn = strdup(fn);
if (!h->fn)
goto out_h;
h->maplen = st.st_size;
sz = (h->maplen + page_size - 1) & ~(page_size - 1);
h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
close(fd);
if (h->map == (char *)-1) {
pr_debug("cannot mmap source file %s\n", fn);
goto out_fn;
}
h->numlines = countlines(h->map, h->maplen);
h->lines = calloc(h->numlines, sizeof(char *));
if (!h->lines)
goto out_map;
fill_lines(h->lines, h->numlines, h->map, h->maplen);
list_add(&h->nd, &srcfile_list);
hlist_add_head(&h->hash_nd, &srcfile_htab[hval]);
map_total_sz += h->maplen;
num_srcfiles++;
return h;
out_map:
munmap(h->map, sz);
out_fn:
zfree(&h->fn);
out_h:
free(h);
return NULL;
}
/* Result is not 0 terminated */
char *find_sourceline(char *fn, unsigned line, int *lenp)
{
char *l, *p;
struct srcfile *sf = find_srcfile(fn);
if (!sf)
return NULL;
line--;
if (line >= sf->numlines)
return NULL;
l = sf->lines[line];
if (!l)
return NULL;
p = memchr(l, '\n', sf->map + sf->maplen - l);
*lenp = p - l;
return l;
}
| linux-master | tools/perf/util/srccode.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2017 Hari Bathini, IBM Corporation
*/
#include "namespaces.h"
#include "event.h"
#include "get_current_dir_name.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <limits.h>
#include <sched.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <asm/bug.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
static const char *perf_ns__names[] = {
[NET_NS_INDEX] = "net",
[UTS_NS_INDEX] = "uts",
[IPC_NS_INDEX] = "ipc",
[PID_NS_INDEX] = "pid",
[USER_NS_INDEX] = "user",
[MNT_NS_INDEX] = "mnt",
[CGROUP_NS_INDEX] = "cgroup",
};
const char *perf_ns__name(unsigned int id)
{
if (id >= ARRAY_SIZE(perf_ns__names))
return "UNKNOWN";
return perf_ns__names[id];
}
struct namespaces *namespaces__new(struct perf_record_namespaces *event)
{
struct namespaces *namespaces;
u64 link_info_size = ((event ? event->nr_namespaces : NR_NAMESPACES) *
sizeof(struct perf_ns_link_info));
namespaces = zalloc(sizeof(struct namespaces) + link_info_size);
if (!namespaces)
return NULL;
namespaces->end_time = -1;
if (event)
memcpy(namespaces->link_info, event->link_info, link_info_size);
return namespaces;
}
void namespaces__free(struct namespaces *namespaces)
{
free(namespaces);
}
static int nsinfo__get_nspid(pid_t *tgid, pid_t *nstgid, bool *in_pidns, const char *path)
{
FILE *f = NULL;
char *statln = NULL;
size_t linesz = 0;
char *nspid;
f = fopen(path, "r");
if (f == NULL)
return -1;
while (getline(&statln, &linesz, f) != -1) {
/* Use tgid if CONFIG_PID_NS is not defined. */
if (strstr(statln, "Tgid:") != NULL) {
*tgid = (pid_t)strtol(strrchr(statln, '\t'), NULL, 10);
*nstgid = *tgid;
}
if (strstr(statln, "NStgid:") != NULL) {
nspid = strrchr(statln, '\t');
*nstgid = (pid_t)strtol(nspid, NULL, 10);
/*
* If innermost tgid is not the first, process is in a different
* PID namespace.
*/
*in_pidns = (statln + sizeof("NStgid:") - 1) != nspid;
break;
}
}
fclose(f);
free(statln);
return 0;
}
int nsinfo__init(struct nsinfo *nsi)
{
char oldns[PATH_MAX];
char spath[PATH_MAX];
char *newns = NULL;
struct stat old_stat;
struct stat new_stat;
int rv = -1;
if (snprintf(oldns, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
return rv;
if (asprintf(&newns, "/proc/%d/ns/mnt", nsinfo__pid(nsi)) == -1)
return rv;
if (stat(oldns, &old_stat) < 0)
goto out;
if (stat(newns, &new_stat) < 0)
goto out;
/* Check if the mount namespaces differ, if so then indicate that we
* want to switch as part of looking up dso/map data.
*/
if (old_stat.st_ino != new_stat.st_ino) {
RC_CHK_ACCESS(nsi)->need_setns = true;
RC_CHK_ACCESS(nsi)->mntns_path = newns;
newns = NULL;
}
/* If we're dealing with a process that is in a different PID namespace,
* attempt to work out the innermost tgid for the process.
*/
if (snprintf(spath, PATH_MAX, "/proc/%d/status", nsinfo__pid(nsi)) >= PATH_MAX)
goto out;
rv = nsinfo__get_nspid(&RC_CHK_ACCESS(nsi)->tgid, &RC_CHK_ACCESS(nsi)->nstgid,
&RC_CHK_ACCESS(nsi)->in_pidns, spath);
out:
free(newns);
return rv;
}
static struct nsinfo *nsinfo__alloc(void)
{
struct nsinfo *res;
RC_STRUCT(nsinfo) *nsi;
nsi = calloc(1, sizeof(*nsi));
if (ADD_RC_CHK(res, nsi))
refcount_set(&nsi->refcnt, 1);
return res;
}
struct nsinfo *nsinfo__new(pid_t pid)
{
struct nsinfo *nsi;
if (pid == 0)
return NULL;
nsi = nsinfo__alloc();
if (!nsi)
return NULL;
RC_CHK_ACCESS(nsi)->pid = pid;
RC_CHK_ACCESS(nsi)->tgid = pid;
RC_CHK_ACCESS(nsi)->nstgid = pid;
nsinfo__clear_need_setns(nsi);
RC_CHK_ACCESS(nsi)->in_pidns = false;
/* Init may fail if the process exits while we're trying to look at its
* proc information. In that case, save the pid but don't try to enter
* the namespace.
*/
if (nsinfo__init(nsi) == -1)
nsinfo__clear_need_setns(nsi);
return nsi;
}
static const char *nsinfo__mntns_path(const struct nsinfo *nsi)
{
return RC_CHK_ACCESS(nsi)->mntns_path;
}
struct nsinfo *nsinfo__copy(const struct nsinfo *nsi)
{
struct nsinfo *nnsi;
if (nsi == NULL)
return NULL;
nnsi = nsinfo__alloc();
if (!nnsi)
return NULL;
RC_CHK_ACCESS(nnsi)->pid = nsinfo__pid(nsi);
RC_CHK_ACCESS(nnsi)->tgid = nsinfo__tgid(nsi);
RC_CHK_ACCESS(nnsi)->nstgid = nsinfo__nstgid(nsi);
RC_CHK_ACCESS(nnsi)->need_setns = nsinfo__need_setns(nsi);
RC_CHK_ACCESS(nnsi)->in_pidns = nsinfo__in_pidns(nsi);
if (nsinfo__mntns_path(nsi)) {
RC_CHK_ACCESS(nnsi)->mntns_path = strdup(nsinfo__mntns_path(nsi));
if (!RC_CHK_ACCESS(nnsi)->mntns_path) {
nsinfo__put(nnsi);
return NULL;
}
}
return nnsi;
}
static refcount_t *nsinfo__refcnt(struct nsinfo *nsi)
{
return &RC_CHK_ACCESS(nsi)->refcnt;
}
static void nsinfo__delete(struct nsinfo *nsi)
{
if (nsi) {
WARN_ONCE(refcount_read(nsinfo__refcnt(nsi)) != 0, "nsinfo refcnt unbalanced\n");
zfree(&RC_CHK_ACCESS(nsi)->mntns_path);
RC_CHK_FREE(nsi);
}
}
struct nsinfo *nsinfo__get(struct nsinfo *nsi)
{
struct nsinfo *result;
if (RC_CHK_GET(result, nsi))
refcount_inc(nsinfo__refcnt(nsi));
return result;
}
void nsinfo__put(struct nsinfo *nsi)
{
if (nsi && refcount_dec_and_test(nsinfo__refcnt(nsi)))
nsinfo__delete(nsi);
else
RC_CHK_PUT(nsi);
}
bool nsinfo__need_setns(const struct nsinfo *nsi)
{
return RC_CHK_ACCESS(nsi)->need_setns;
}
void nsinfo__clear_need_setns(struct nsinfo *nsi)
{
RC_CHK_ACCESS(nsi)->need_setns = false;
}
pid_t nsinfo__tgid(const struct nsinfo *nsi)
{
return RC_CHK_ACCESS(nsi)->tgid;
}
pid_t nsinfo__nstgid(const struct nsinfo *nsi)
{
return RC_CHK_ACCESS(nsi)->nstgid;
}
pid_t nsinfo__pid(const struct nsinfo *nsi)
{
return RC_CHK_ACCESS(nsi)->pid;
}
pid_t nsinfo__in_pidns(const struct nsinfo *nsi)
{
return RC_CHK_ACCESS(nsi)->in_pidns;
}
void nsinfo__mountns_enter(struct nsinfo *nsi,
struct nscookie *nc)
{
char curpath[PATH_MAX];
int oldns = -1;
int newns = -1;
char *oldcwd = NULL;
if (nc == NULL)
return;
nc->oldns = -1;
nc->newns = -1;
if (!nsi || !nsinfo__need_setns(nsi))
return;
if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
return;
oldcwd = get_current_dir_name();
if (!oldcwd)
return;
oldns = open(curpath, O_RDONLY);
if (oldns < 0)
goto errout;
newns = open(nsinfo__mntns_path(nsi), O_RDONLY);
if (newns < 0)
goto errout;
if (setns(newns, CLONE_NEWNS) < 0)
goto errout;
nc->oldcwd = oldcwd;
nc->oldns = oldns;
nc->newns = newns;
return;
errout:
free(oldcwd);
if (oldns > -1)
close(oldns);
if (newns > -1)
close(newns);
}
void nsinfo__mountns_exit(struct nscookie *nc)
{
if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
return;
setns(nc->oldns, CLONE_NEWNS);
if (nc->oldcwd) {
WARN_ON_ONCE(chdir(nc->oldcwd));
zfree(&nc->oldcwd);
}
if (nc->oldns > -1) {
close(nc->oldns);
nc->oldns = -1;
}
if (nc->newns > -1) {
close(nc->newns);
nc->newns = -1;
}
}
char *nsinfo__realpath(const char *path, struct nsinfo *nsi)
{
char *rpath;
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
rpath = realpath(path, NULL);
nsinfo__mountns_exit(&nsc);
return rpath;
}
int nsinfo__stat(const char *filename, struct stat *st, struct nsinfo *nsi)
{
int ret;
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
ret = stat(filename, st);
nsinfo__mountns_exit(&nsc);
return ret;
}
bool nsinfo__is_in_root_namespace(void)
{
pid_t tgid = 0, nstgid = 0;
bool in_pidns = false;
nsinfo__get_nspid(&tgid, &nstgid, &in_pidns, "/proc/self/status");
return !in_pidns;
}
| linux-master | tools/perf/util/namespaces.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AMD specific. Provide textual annotation for IBS raw sample data.
*/
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <linux/string.h>
#include "../../arch/x86/include/asm/amd-ibs.h"
#include "debug.h"
#include "session.h"
#include "evlist.h"
#include "sample-raw.h"
#include "util/sample.h"
static u32 cpu_family, cpu_model, ibs_fetch_type, ibs_op_type;
static bool zen4_ibs_extensions;
static void pr_ibs_fetch_ctl(union ibs_fetch_ctl reg)
{
const char * const ic_miss_strs[] = {
" IcMiss 0",
" IcMiss 1",
};
const char * const l1tlb_pgsz_strs[] = {
" L1TlbPgSz 4KB",
" L1TlbPgSz 2MB",
" L1TlbPgSz 1GB",
" L1TlbPgSz RESERVED"
};
const char * const l1tlb_pgsz_strs_erratum1347[] = {
" L1TlbPgSz 4KB",
" L1TlbPgSz 16KB",
" L1TlbPgSz 2MB",
" L1TlbPgSz 1GB"
};
const char *ic_miss_str = NULL;
const char *l1tlb_pgsz_str = NULL;
char l3_miss_str[sizeof(" L3MissOnly _ FetchOcMiss _ FetchL3Miss _")] = "";
if (cpu_family == 0x19 && cpu_model < 0x10) {
/*
* Erratum #1238 workaround is to ignore MSRC001_1030[IbsIcMiss]
* Erratum #1347 workaround is to use table provided in erratum
*/
if (reg.phy_addr_valid)
l1tlb_pgsz_str = l1tlb_pgsz_strs_erratum1347[reg.l1tlb_pgsz];
} else {
if (reg.phy_addr_valid)
l1tlb_pgsz_str = l1tlb_pgsz_strs[reg.l1tlb_pgsz];
ic_miss_str = ic_miss_strs[reg.ic_miss];
}
if (zen4_ibs_extensions) {
snprintf(l3_miss_str, sizeof(l3_miss_str),
" L3MissOnly %d FetchOcMiss %d FetchL3Miss %d",
reg.l3_miss_only, reg.fetch_oc_miss, reg.fetch_l3_miss);
}
printf("ibs_fetch_ctl:\t%016llx MaxCnt %7d Cnt %7d Lat %5d En %d Val %d Comp %d%s "
"PhyAddrValid %d%s L1TlbMiss %d L2TlbMiss %d RandEn %d%s%s\n",
reg.val, reg.fetch_maxcnt << 4, reg.fetch_cnt << 4, reg.fetch_lat,
reg.fetch_en, reg.fetch_val, reg.fetch_comp, ic_miss_str ? : "",
reg.phy_addr_valid, l1tlb_pgsz_str ? : "", reg.l1tlb_miss, reg.l2tlb_miss,
reg.rand_en, reg.fetch_comp ? (reg.fetch_l2_miss ? " L2Miss 1" : " L2Miss 0") : "",
l3_miss_str);
}
static void pr_ic_ibs_extd_ctl(union ic_ibs_extd_ctl reg)
{
printf("ic_ibs_ext_ctl:\t%016llx IbsItlbRefillLat %3d\n", reg.val, reg.itlb_refill_lat);
}
static void pr_ibs_op_ctl(union ibs_op_ctl reg)
{
char l3_miss_only[sizeof(" L3MissOnly _")] = "";
if (zen4_ibs_extensions)
snprintf(l3_miss_only, sizeof(l3_miss_only), " L3MissOnly %d", reg.l3_miss_only);
printf("ibs_op_ctl:\t%016llx MaxCnt %9d%s En %d Val %d CntCtl %d=%s CurCnt %9d\n",
reg.val, ((reg.opmaxcnt_ext << 16) | reg.opmaxcnt) << 4, l3_miss_only,
reg.op_en, reg.op_val, reg.cnt_ctl,
reg.cnt_ctl ? "uOps" : "cycles", reg.opcurcnt);
}
static void pr_ibs_op_data(union ibs_op_data reg)
{
printf("ibs_op_data:\t%016llx CompToRetCtr %5d TagToRetCtr %5d%s%s%s BrnRet %d "
" RipInvalid %d BrnFuse %d Microcode %d\n",
reg.val, reg.comp_to_ret_ctr, reg.tag_to_ret_ctr,
reg.op_brn_ret ? (reg.op_return ? " OpReturn 1" : " OpReturn 0") : "",
reg.op_brn_ret ? (reg.op_brn_taken ? " OpBrnTaken 1" : " OpBrnTaken 0") : "",
reg.op_brn_ret ? (reg.op_brn_misp ? " OpBrnMisp 1" : " OpBrnMisp 0") : "",
reg.op_brn_ret, reg.op_rip_invalid, reg.op_brn_fuse, reg.op_microcode);
}
static void pr_ibs_op_data2_extended(union ibs_op_data2 reg)
{
static const char * const data_src_str[] = {
"",
" DataSrc 1=Local L3 or other L1/L2 in CCX",
" DataSrc 2=Another CCX cache in the same NUMA node",
" DataSrc 3=DRAM",
" DataSrc 4=(reserved)",
" DataSrc 5=Another CCX cache in a different NUMA node",
" DataSrc 6=Long-latency DIMM",
" DataSrc 7=MMIO/Config/PCI/APIC",
" DataSrc 8=Extension Memory",
" DataSrc 9=(reserved)",
" DataSrc 10=(reserved)",
" DataSrc 11=(reserved)",
" DataSrc 12=Coherent Memory of a different processor type",
/* 13 to 31 are reserved. Avoid printing them. */
};
int data_src = (reg.data_src_hi << 3) | reg.data_src_lo;
printf("ibs_op_data2:\t%016llx %sRmtNode %d%s\n", reg.val,
(data_src == 1 || data_src == 2 || data_src == 5) ?
(reg.cache_hit_st ? "CacheHitSt 1=O-State " : "CacheHitSt 0=M-state ") : "",
reg.rmt_node,
data_src < (int)ARRAY_SIZE(data_src_str) ? data_src_str[data_src] : "");
}
static void pr_ibs_op_data2_default(union ibs_op_data2 reg)
{
static const char * const data_src_str[] = {
"",
" DataSrc 1=(reserved)",
" DataSrc 2=Local node cache",
" DataSrc 3=DRAM",
" DataSrc 4=Remote node cache",
" DataSrc 5=(reserved)",
" DataSrc 6=(reserved)",
" DataSrc 7=Other"
};
printf("ibs_op_data2:\t%016llx %sRmtNode %d%s\n", reg.val,
reg.data_src_lo == 2 ? (reg.cache_hit_st ? "CacheHitSt 1=O-State "
: "CacheHitSt 0=M-state ") : "",
reg.rmt_node, data_src_str[reg.data_src_lo]);
}
static void pr_ibs_op_data2(union ibs_op_data2 reg)
{
if (zen4_ibs_extensions)
return pr_ibs_op_data2_extended(reg);
pr_ibs_op_data2_default(reg);
}
static void pr_ibs_op_data3(union ibs_op_data3 reg)
{
char l2_miss_str[sizeof(" L2Miss _")] = "";
char op_mem_width_str[sizeof(" OpMemWidth _____ bytes")] = "";
char op_dc_miss_open_mem_reqs_str[sizeof(" OpDcMissOpenMemReqs __")] = "";
/*
* Erratum #1293
* Ignore L2Miss and OpDcMissOpenMemReqs (and opdata2) if DcMissNoMabAlloc or SwPf set
*/
if (!(cpu_family == 0x19 && cpu_model < 0x10 && (reg.dc_miss_no_mab_alloc || reg.sw_pf))) {
snprintf(l2_miss_str, sizeof(l2_miss_str), " L2Miss %d", reg.l2_miss);
snprintf(op_dc_miss_open_mem_reqs_str, sizeof(op_dc_miss_open_mem_reqs_str),
" OpDcMissOpenMemReqs %2d", reg.op_dc_miss_open_mem_reqs);
}
if (reg.op_mem_width)
snprintf(op_mem_width_str, sizeof(op_mem_width_str),
" OpMemWidth %2d bytes", 1 << (reg.op_mem_width - 1));
printf("ibs_op_data3:\t%016llx LdOp %d StOp %d DcL1TlbMiss %d DcL2TlbMiss %d "
"DcL1TlbHit2M %d DcL1TlbHit1G %d DcL2TlbHit2M %d DcMiss %d DcMisAcc %d "
"DcWcMemAcc %d DcUcMemAcc %d DcLockedOp %d DcMissNoMabAlloc %d DcLinAddrValid %d "
"DcPhyAddrValid %d DcL2TlbHit1G %d%s SwPf %d%s%s DcMissLat %5d TlbRefillLat %5d\n",
reg.val, reg.ld_op, reg.st_op, reg.dc_l1tlb_miss, reg.dc_l2tlb_miss,
reg.dc_l1tlb_hit_2m, reg.dc_l1tlb_hit_1g, reg.dc_l2tlb_hit_2m, reg.dc_miss,
reg.dc_mis_acc, reg.dc_wc_mem_acc, reg.dc_uc_mem_acc, reg.dc_locked_op,
reg.dc_miss_no_mab_alloc, reg.dc_lin_addr_valid, reg.dc_phy_addr_valid,
reg.dc_l2_tlb_hit_1g, l2_miss_str, reg.sw_pf, op_mem_width_str,
op_dc_miss_open_mem_reqs_str, reg.dc_miss_lat, reg.tlb_refill_lat);
}
/*
* IBS Op/Execution MSRs always saved, in order, are:
* IBS_OP_CTL, IBS_OP_RIP, IBS_OP_DATA, IBS_OP_DATA2,
* IBS_OP_DATA3, IBS_DC_LINADDR, IBS_DC_PHYSADDR, BP_IBSTGT_RIP
*/
static void amd_dump_ibs_op(struct perf_sample *sample)
{
struct perf_ibs_data *data = sample->raw_data;
union ibs_op_ctl *op_ctl = (union ibs_op_ctl *)data->data;
__u64 *rip = (__u64 *)op_ctl + 1;
union ibs_op_data *op_data = (union ibs_op_data *)(rip + 1);
union ibs_op_data3 *op_data3 = (union ibs_op_data3 *)(rip + 3);
pr_ibs_op_ctl(*op_ctl);
if (!op_data->op_rip_invalid)
printf("IbsOpRip:\t%016llx\n", *rip);
pr_ibs_op_data(*op_data);
/*
* Erratum #1293: ignore op_data2 if DcMissNoMabAlloc or SwPf are set
*/
if (!(cpu_family == 0x19 && cpu_model < 0x10 &&
(op_data3->dc_miss_no_mab_alloc || op_data3->sw_pf)))
pr_ibs_op_data2(*(union ibs_op_data2 *)(rip + 2));
pr_ibs_op_data3(*op_data3);
if (op_data3->dc_lin_addr_valid)
printf("IbsDCLinAd:\t%016llx\n", *(rip + 4));
if (op_data3->dc_phy_addr_valid)
printf("IbsDCPhysAd:\t%016llx\n", *(rip + 5));
if (op_data->op_brn_ret && *(rip + 6))
printf("IbsBrTarget:\t%016llx\n", *(rip + 6));
}
/*
* IBS Fetch MSRs always saved, in order, are:
* IBS_FETCH_CTL, IBS_FETCH_LINADDR, IBS_FETCH_PHYSADDR, IC_IBS_EXTD_CTL
*/
static void amd_dump_ibs_fetch(struct perf_sample *sample)
{
struct perf_ibs_data *data = sample->raw_data;
union ibs_fetch_ctl *fetch_ctl = (union ibs_fetch_ctl *)data->data;
__u64 *addr = (__u64 *)fetch_ctl + 1;
union ic_ibs_extd_ctl *extd_ctl = (union ic_ibs_extd_ctl *)addr + 2;
pr_ibs_fetch_ctl(*fetch_ctl);
printf("IbsFetchLinAd:\t%016llx\n", *addr++);
if (fetch_ctl->phy_addr_valid)
printf("IbsFetchPhysAd:\t%016llx\n", *addr);
pr_ic_ibs_extd_ctl(*extd_ctl);
}
/*
* Test for enable and valid bits in captured control MSRs.
*/
static bool is_valid_ibs_fetch_sample(struct perf_sample *sample)
{
struct perf_ibs_data *data = sample->raw_data;
union ibs_fetch_ctl *fetch_ctl = (union ibs_fetch_ctl *)data->data;
if (fetch_ctl->fetch_en && fetch_ctl->fetch_val)
return true;
return false;
}
static bool is_valid_ibs_op_sample(struct perf_sample *sample)
{
struct perf_ibs_data *data = sample->raw_data;
union ibs_op_ctl *op_ctl = (union ibs_op_ctl *)data->data;
if (op_ctl->op_en && op_ctl->op_val)
return true;
return false;
}
/* AMD vendor specific raw sample function. Check for PERF_RECORD_SAMPLE events
* and if the event was triggered by IBS, display its raw data with decoded text.
* The function is only invoked when the dump flag -D is set.
*/
void evlist__amd_sample_raw(struct evlist *evlist, union perf_event *event,
struct perf_sample *sample)
{
struct evsel *evsel;
if (event->header.type != PERF_RECORD_SAMPLE || !sample->raw_size)
return;
evsel = evlist__event2evsel(evlist, event);
if (!evsel)
return;
if (evsel->core.attr.type == ibs_fetch_type) {
if (!is_valid_ibs_fetch_sample(sample)) {
pr_debug("Invalid raw IBS Fetch MSR data encountered\n");
return;
}
amd_dump_ibs_fetch(sample);
} else if (evsel->core.attr.type == ibs_op_type) {
if (!is_valid_ibs_op_sample(sample)) {
pr_debug("Invalid raw IBS Op MSR data encountered\n");
return;
}
amd_dump_ibs_op(sample);
}
}
static void parse_cpuid(struct perf_env *env)
{
const char *cpuid;
int ret;
cpuid = perf_env__cpuid(env);
/*
* cpuid = "AuthenticAMD,family,model,stepping"
*/
ret = sscanf(cpuid, "%*[^,],%u,%u", &cpu_family, &cpu_model);
if (ret != 2)
pr_debug("problem parsing cpuid\n");
}
/*
* Find and assign the type number used for ibs_op or ibs_fetch samples.
* Device names can be large - we are only interested in the first 9 characters,
* to match "ibs_fetch".
*/
bool evlist__has_amd_ibs(struct evlist *evlist)
{
struct perf_env *env = evlist->env;
int ret, nr_pmu_mappings = perf_env__nr_pmu_mappings(env);
const char *pmu_mapping = perf_env__pmu_mappings(env);
char name[sizeof("ibs_fetch")];
u32 type;
while (nr_pmu_mappings--) {
ret = sscanf(pmu_mapping, "%u:%9s", &type, name);
if (ret == 2) {
if (strstarts(name, "ibs_op"))
ibs_op_type = type;
else if (strstarts(name, "ibs_fetch"))
ibs_fetch_type = type;
}
pmu_mapping += strlen(pmu_mapping) + 1 /* '\0' */;
}
if (perf_env__find_pmu_cap(env, "ibs_op", "zen4_ibs_extensions"))
zen4_ibs_extensions = 1;
if (ibs_fetch_type || ibs_op_type) {
if (!cpu_family)
parse_cpuid(env);
return true;
}
return false;
}
| linux-master | tools/perf/util/amd-sample-raw.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/debug.h"
#include "util/evlist.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/target.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include "util/lock-contention.h"
#include <linux/zalloc.h>
#include <linux/string.h>
#include <bpf/bpf.h>
#include "bpf_skel/lock_contention.skel.h"
#include "bpf_skel/lock_data.h"
static struct lock_contention_bpf *skel;
int lock_contention_prepare(struct lock_contention *con)
{
int i, fd;
int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1;
struct evlist *evlist = con->evlist;
struct target *target = con->target;
skel = lock_contention_bpf__open();
if (!skel) {
pr_err("Failed to open lock-contention BPF skeleton\n");
return -1;
}
bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
if (con->aggr_mode == LOCK_AGGR_TASK)
bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
else
bpf_map__set_max_entries(skel->maps.task_data, 1);
if (con->save_callstack)
bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
else
bpf_map__set_max_entries(skel->maps.stacks, 1);
if (target__has_cpu(target))
ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
if (target__has_task(target))
ntasks = perf_thread_map__nr(evlist->core.threads);
if (con->filters->nr_types)
ntypes = con->filters->nr_types;
/* resolve lock name filters to addr */
if (con->filters->nr_syms) {
struct symbol *sym;
struct map *kmap;
unsigned long *addrs;
for (i = 0; i < con->filters->nr_syms; i++) {
sym = machine__find_kernel_symbol_by_name(con->machine,
con->filters->syms[i],
&kmap);
if (sym == NULL) {
pr_warning("ignore unknown symbol: %s\n",
con->filters->syms[i]);
continue;
}
addrs = realloc(con->filters->addrs,
(con->filters->nr_addrs + 1) * sizeof(*addrs));
if (addrs == NULL) {
pr_warning("memory allocation failure\n");
continue;
}
addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
con->filters->addrs = addrs;
}
naddrs = con->filters->nr_addrs;
}
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
if (lock_contention_bpf__load(skel) < 0) {
pr_err("Failed to load lock-contention BPF skeleton\n");
return -1;
}
if (target__has_cpu(target)) {
u32 cpu;
u8 val = 1;
skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
}
}
if (target__has_task(target)) {
u32 pid;
u8 val = 1;
skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
pid = perf_thread_map__pid(evlist->core.threads, i);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
}
if (target__none(target) && evlist->workload.pid > 0) {
u32 pid = evlist->workload.pid;
u8 val = 1;
skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
if (con->filters->nr_types) {
u8 val = 1;
skel->bss->has_type = 1;
fd = bpf_map__fd(skel->maps.type_filter);
for (i = 0; i < con->filters->nr_types; i++)
bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
}
if (con->filters->nr_addrs) {
u8 val = 1;
skel->bss->has_addr = 1;
fd = bpf_map__fd(skel->maps.addr_filter);
for (i = 0; i < con->filters->nr_addrs; i++)
bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
}
/* these don't work well if in the rodata section */
skel->bss->stack_skip = con->stack_skip;
skel->bss->aggr_mode = con->aggr_mode;
skel->bss->needs_callstack = con->save_callstack;
skel->bss->lock_owner = con->owner;
bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
lock_contention_bpf__attach(skel);
return 0;
}
int lock_contention_start(void)
{
skel->bss->enabled = 1;
return 0;
}
int lock_contention_stop(void)
{
skel->bss->enabled = 0;
return 0;
}
static const char *lock_contention_get_name(struct lock_contention *con,
struct contention_key *key,
u64 *stack_trace, u32 flags)
{
int idx = 0;
u64 addr;
const char *name = "";
static char name_buf[KSYM_NAME_LEN];
struct symbol *sym;
struct map *kmap;
struct machine *machine = con->machine;
if (con->aggr_mode == LOCK_AGGR_TASK) {
struct contention_task_data task;
int pid = key->pid;
int task_fd = bpf_map__fd(skel->maps.task_data);
/* do not update idle comm which contains CPU number */
if (pid) {
struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
if (t == NULL)
return name;
if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
thread__set_comm(t, task.comm, /*timestamp=*/0))
name = task.comm;
}
return name;
}
if (con->aggr_mode == LOCK_AGGR_ADDR) {
int lock_fd = bpf_map__fd(skel->maps.lock_syms);
/* per-process locks set upper bits of the flags */
if (flags & LCD_F_MMAP_LOCK)
return "mmap_lock";
if (flags & LCD_F_SIGHAND_LOCK)
return "siglock";
/* global locks with symbols */
sym = machine__find_kernel_symbol(machine, key->lock_addr, &kmap);
if (sym)
return sym->name;
/* try semi-global locks collected separately */
if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr, &flags)) {
if (flags == LOCK_CLASS_RQLOCK)
return "rq_lock";
}
return "";
}
/* LOCK_AGGR_CALLER: skip lock internal functions */
while (machine__is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1)
idx++;
addr = stack_trace[idx];
sym = machine__find_kernel_symbol(machine, addr, &kmap);
if (sym) {
unsigned long offset;
offset = map__map_ip(kmap, addr) - sym->start;
if (offset == 0)
return sym->name;
snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
} else {
snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
}
return name_buf;
}
int lock_contention_read(struct lock_contention *con)
{
int fd, stack, err = 0;
struct contention_key *prev_key, key = {};
struct contention_data data = {};
struct lock_stat *st = NULL;
struct machine *machine = con->machine;
u64 *stack_trace;
size_t stack_size = con->max_stack * sizeof(*stack_trace);
fd = bpf_map__fd(skel->maps.lock_stat);
stack = bpf_map__fd(skel->maps.stacks);
con->fails.task = skel->bss->task_fail;
con->fails.stack = skel->bss->stack_fail;
con->fails.time = skel->bss->time_fail;
con->fails.data = skel->bss->data_fail;
stack_trace = zalloc(stack_size);
if (stack_trace == NULL)
return -1;
if (con->aggr_mode == LOCK_AGGR_TASK) {
struct thread *idle = __machine__findnew_thread(machine,
/*pid=*/0,
/*tid=*/0);
thread__set_comm(idle, "swapper", /*timestamp=*/0);
}
if (con->aggr_mode == LOCK_AGGR_ADDR) {
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.flags = BPF_F_TEST_RUN_ON_CPU,
);
int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
bpf_prog_test_run_opts(prog_fd, &opts);
}
/* make sure it loads the kernel map */
map__load(maps__first(machine->kmaps)->map);
prev_key = NULL;
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
s64 ls_key;
const char *name;
/* to handle errors in the loop body */
err = -1;
bpf_map_lookup_elem(fd, &key, &data);
if (con->save_callstack) {
bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
if (!match_callstack_filter(machine, stack_trace)) {
con->nr_filtered += data.count;
goto next;
}
}
switch (con->aggr_mode) {
case LOCK_AGGR_CALLER:
ls_key = key.stack_id;
break;
case LOCK_AGGR_TASK:
ls_key = key.pid;
break;
case LOCK_AGGR_ADDR:
ls_key = key.lock_addr;
break;
default:
goto next;
}
st = lock_stat_find(ls_key);
if (st != NULL) {
st->wait_time_total += data.total_time;
if (st->wait_time_max < data.max_time)
st->wait_time_max = data.max_time;
if (st->wait_time_min > data.min_time)
st->wait_time_min = data.min_time;
st->nr_contended += data.count;
if (st->nr_contended)
st->avg_wait_time = st->wait_time_total / st->nr_contended;
goto next;
}
name = lock_contention_get_name(con, &key, stack_trace, data.flags);
st = lock_stat_findnew(ls_key, name, data.flags);
if (st == NULL)
break;
st->nr_contended = data.count;
st->wait_time_total = data.total_time;
st->wait_time_max = data.max_time;
st->wait_time_min = data.min_time;
if (data.count)
st->avg_wait_time = data.total_time / data.count;
if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
st->callstack = memdup(stack_trace, stack_size);
if (st->callstack == NULL)
break;
}
next:
prev_key = &key;
/* we're fine now, reset the error */
err = 0;
}
free(stack_trace);
return err;
}
int lock_contention_finish(void)
{
if (skel) {
skel->bss->enabled = 0;
lock_contention_bpf__destroy(skel);
}
return 0;
}
| linux-master | tools/perf/util/bpf_lock_contention.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include "dso.h"
#include "session.h"
#include "thread.h"
#include "thread-stack.h"
#include "debug.h"
#include "namespaces.h"
#include "comm.h"
#include "map.h"
#include "symbol.h"
#include "unwind.h"
#include "callchain.h"
#include <api/fs/fs.h>
int thread__init_maps(struct thread *thread, struct machine *machine)
{
pid_t pid = thread__pid(thread);
if (pid == thread__tid(thread) || pid == -1) {
thread__set_maps(thread, maps__new(machine));
} else {
struct thread *leader = __machine__findnew_thread(machine, pid, pid);
if (leader) {
thread__set_maps(thread, maps__get(thread__maps(leader)));
thread__put(leader);
}
}
return thread__maps(thread) ? 0 : -1;
}
struct thread *thread__new(pid_t pid, pid_t tid)
{
char *comm_str;
struct comm *comm;
RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
struct thread *thread;
if (ADD_RC_CHK(thread, _thread) != NULL) {
thread__set_pid(thread, pid);
thread__set_tid(thread, tid);
thread__set_ppid(thread, -1);
thread__set_cpu(thread, -1);
thread__set_guest_cpu(thread, -1);
thread__set_lbr_stitch_enable(thread, false);
INIT_LIST_HEAD(thread__namespaces_list(thread));
INIT_LIST_HEAD(thread__comm_list(thread));
init_rwsem(thread__namespaces_lock(thread));
init_rwsem(thread__comm_lock(thread));
comm_str = malloc(32);
if (!comm_str)
goto err_thread;
snprintf(comm_str, 32, ":%d", tid);
comm = comm__new(comm_str, 0, false);
free(comm_str);
if (!comm)
goto err_thread;
list_add(&comm->list, thread__comm_list(thread));
refcount_set(thread__refcnt(thread), 1);
/* Thread holds first ref to nsdata. */
RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
srccode_state_init(thread__srccode_state(thread));
}
return thread;
err_thread:
free(thread);
return NULL;
}
static void (*thread__priv_destructor)(void *priv);
void thread__set_priv_destructor(void (*destructor)(void *priv))
{
assert(thread__priv_destructor == NULL);
thread__priv_destructor = destructor;
}
void thread__delete(struct thread *thread)
{
struct namespaces *namespaces, *tmp_namespaces;
struct comm *comm, *tmp_comm;
thread_stack__free(thread);
if (thread__maps(thread)) {
maps__put(thread__maps(thread));
thread__set_maps(thread, NULL);
}
down_write(thread__namespaces_lock(thread));
list_for_each_entry_safe(namespaces, tmp_namespaces,
thread__namespaces_list(thread), list) {
list_del_init(&namespaces->list);
namespaces__free(namespaces);
}
up_write(thread__namespaces_lock(thread));
down_write(thread__comm_lock(thread));
list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
list_del_init(&comm->list);
comm__free(comm);
}
up_write(thread__comm_lock(thread));
nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
srccode_state_free(thread__srccode_state(thread));
exit_rwsem(thread__namespaces_lock(thread));
exit_rwsem(thread__comm_lock(thread));
thread__free_stitch_list(thread);
if (thread__priv_destructor)
thread__priv_destructor(thread__priv(thread));
RC_CHK_FREE(thread);
}
struct thread *thread__get(struct thread *thread)
{
struct thread *result;
if (RC_CHK_GET(result, thread))
refcount_inc(thread__refcnt(thread));
return result;
}
void thread__put(struct thread *thread)
{
if (thread && refcount_dec_and_test(thread__refcnt(thread)))
thread__delete(thread);
else
RC_CHK_PUT(thread);
}
static struct namespaces *__thread__namespaces(struct thread *thread)
{
if (list_empty(thread__namespaces_list(thread)))
return NULL;
return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
}
struct namespaces *thread__namespaces(struct thread *thread)
{
struct namespaces *ns;
down_read(thread__namespaces_lock(thread));
ns = __thread__namespaces(thread);
up_read(thread__namespaces_lock(thread));
return ns;
}
static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
struct perf_record_namespaces *event)
{
struct namespaces *new, *curr = __thread__namespaces(thread);
new = namespaces__new(event);
if (!new)
return -ENOMEM;
list_add(&new->list, thread__namespaces_list(thread));
if (timestamp && curr) {
/*
* setns syscall must have changed few or all the namespaces
* of this thread. Update end time for the namespaces
* previously used.
*/
curr = list_next_entry(new, list);
curr->end_time = timestamp;
}
return 0;
}
int thread__set_namespaces(struct thread *thread, u64 timestamp,
struct perf_record_namespaces *event)
{
int ret;
down_write(thread__namespaces_lock(thread));
ret = __thread__set_namespaces(thread, timestamp, event);
up_write(thread__namespaces_lock(thread));
return ret;
}
struct comm *thread__comm(struct thread *thread)
{
if (list_empty(thread__comm_list(thread)))
return NULL;
return list_first_entry(thread__comm_list(thread), struct comm, list);
}
struct comm *thread__exec_comm(struct thread *thread)
{
struct comm *comm, *last = NULL, *second_last = NULL;
list_for_each_entry(comm, thread__comm_list(thread), list) {
if (comm->exec)
return comm;
second_last = last;
last = comm;
}
/*
* 'last' with no start time might be the parent's comm of a synthesized
* thread (created by processing a synthesized fork event). For a main
* thread, that is very probably wrong. Prefer a later comm to avoid
* that case.
*/
if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
return second_last;
return last;
}
static int ____thread__set_comm(struct thread *thread, const char *str,
u64 timestamp, bool exec)
{
struct comm *new, *curr = thread__comm(thread);
/* Override the default :tid entry */
if (!thread__comm_set(thread)) {
int err = comm__override(curr, str, timestamp, exec);
if (err)
return err;
} else {
new = comm__new(str, timestamp, exec);
if (!new)
return -ENOMEM;
list_add(&new->list, thread__comm_list(thread));
if (exec)
unwind__flush_access(thread__maps(thread));
}
thread__set_comm_set(thread, true);
return 0;
}
int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
bool exec)
{
int ret;
down_write(thread__comm_lock(thread));
ret = ____thread__set_comm(thread, str, timestamp, exec);
up_write(thread__comm_lock(thread));
return ret;
}
int thread__set_comm_from_proc(struct thread *thread)
{
char path[64];
char *comm = NULL;
size_t sz;
int err = -1;
if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
procfs__read_str(path, &comm, &sz) == 0) {
comm[sz - 1] = '\0';
err = thread__set_comm(thread, comm, 0);
}
return err;
}
static const char *__thread__comm_str(struct thread *thread)
{
const struct comm *comm = thread__comm(thread);
if (!comm)
return NULL;
return comm__str(comm);
}
const char *thread__comm_str(struct thread *thread)
{
const char *str;
down_read(thread__comm_lock(thread));
str = __thread__comm_str(thread);
up_read(thread__comm_lock(thread));
return str;
}
static int __thread__comm_len(struct thread *thread, const char *comm)
{
if (!comm)
return 0;
thread__set_comm_len(thread, strlen(comm));
return thread__var_comm_len(thread);
}
/* CHECKME: it should probably better return the max comm len from its comm list */
int thread__comm_len(struct thread *thread)
{
int comm_len = thread__var_comm_len(thread);
if (!comm_len) {
const char *comm;
down_read(thread__comm_lock(thread));
comm = __thread__comm_str(thread);
comm_len = __thread__comm_len(thread, comm);
up_read(thread__comm_lock(thread));
}
return comm_len;
}
size_t thread__fprintf(struct thread *thread, FILE *fp)
{
return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
maps__fprintf(thread__maps(thread), fp);
}
int thread__insert_map(struct thread *thread, struct map *map)
{
int ret;
ret = unwind__prepare_access(thread__maps(thread), map, NULL);
if (ret)
return ret;
maps__fixup_overlappings(thread__maps(thread), map, stderr);
return maps__insert(thread__maps(thread), map);
}
static int __thread__prepare_access(struct thread *thread)
{
bool initialized = false;
int err = 0;
struct maps *maps = thread__maps(thread);
struct map_rb_node *rb_node;
down_read(maps__lock(maps));
maps__for_each_entry(maps, rb_node) {
err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized);
if (err || initialized)
break;
}
up_read(maps__lock(maps));
return err;
}
static int thread__prepare_access(struct thread *thread)
{
int err = 0;
if (dwarf_callchain_users)
err = __thread__prepare_access(thread);
return err;
}
static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
{
/* This is new thread, we share map groups for process. */
if (thread__pid(thread) == thread__pid(parent))
return thread__prepare_access(thread);
if (thread__maps(thread) == thread__maps(parent)) {
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
thread__pid(thread), thread__tid(thread),
thread__pid(parent), thread__tid(parent));
return 0;
}
/* But this one is new process, copy maps. */
return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0;
}
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
{
if (thread__comm_set(parent)) {
const char *comm = thread__comm_str(parent);
int err;
if (!comm)
return -ENOMEM;
err = thread__set_comm(thread, comm, timestamp);
if (err)
return err;
}
thread__set_ppid(thread, thread__tid(parent));
return thread__clone_maps(thread, parent, do_maps_clone);
}
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al)
{
size_t i;
const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
PERF_RECORD_MISC_GUEST_KERNEL
};
for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
thread__find_symbol(thread, cpumodes[i], addr, al);
if (al->map)
break;
}
}
struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
{
if (thread__pid(thread) == thread__tid(thread))
return thread__get(thread);
if (thread__pid(thread) == -1)
return NULL;
return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
}
int thread__memcpy(struct thread *thread, struct machine *machine,
void *buf, u64 ip, int len, bool *is64bit)
{
u8 cpumode = PERF_RECORD_MISC_USER;
struct addr_location al;
struct dso *dso;
long offset;
if (machine__kernel_ip(machine, ip))
cpumode = PERF_RECORD_MISC_KERNEL;
addr_location__init(&al);
if (!thread__find_map(thread, cpumode, ip, &al)) {
addr_location__exit(&al);
return -1;
}
dso = map__dso(al.map);
if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
addr_location__exit(&al);
return -1;
}
offset = map__map_ip(al.map, ip);
if (is64bit)
*is64bit = dso->is_64_bit;
addr_location__exit(&al);
return dso__data_read_offset(dso, machine, offset, buf, len);
}
void thread__free_stitch_list(struct thread *thread)
{
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct stitch_list *pos, *tmp;
if (!lbr_stitch)
return;
list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
list_del_init(&pos->node);
free(pos);
}
list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
list_del_init(&pos->node);
free(pos);
}
zfree(&lbr_stitch->prev_lbr_cursor);
free(thread__lbr_stitch(thread));
thread__set_lbr_stitch(thread, NULL);
}
| linux-master | tools/perf/util/thread.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009-2011, Frederic Weisbecker <[email protected]>
*
* Handle the callchains from the stream in an ad-hoc radix tree and then
* sort them in an rbtree.
*
* Using a radix for code path provides a fast retrieval and factorizes
* memory use. Also that lets us use the paths in a hierarchical graph view.
*
*/
#include <inttypes.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <errno.h>
#include <math.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include "asm/bug.h"
#include "debug.h"
#include "dso.h"
#include "event.h"
#include "hist.h"
#include "sort.h"
#include "machine.h"
#include "map.h"
#include "callchain.h"
#include "branch.h"
#include "symbol.h"
#include "util.h"
#include "../perf.h"
#define CALLCHAIN_PARAM_DEFAULT \
.mode = CHAIN_GRAPH_ABS, \
.min_percent = 0.5, \
.order = ORDER_CALLEE, \
.key = CCKEY_FUNCTION, \
.value = CCVAL_PERCENT, \
struct callchain_param callchain_param = {
CALLCHAIN_PARAM_DEFAULT
};
/*
* Are there any events usind DWARF callchains?
*
* I.e.
*
* -e cycles/call-graph=dwarf/
*/
bool dwarf_callchain_users;
struct callchain_param callchain_param_default = {
CALLCHAIN_PARAM_DEFAULT
};
/* Used for thread-local struct callchain_cursor. */
static pthread_key_t callchain_cursor;
int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
{
return parse_callchain_record(arg, param);
}
static int parse_callchain_mode(const char *value)
{
if (!strncmp(value, "graph", strlen(value))) {
callchain_param.mode = CHAIN_GRAPH_ABS;
return 0;
}
if (!strncmp(value, "flat", strlen(value))) {
callchain_param.mode = CHAIN_FLAT;
return 0;
}
if (!strncmp(value, "fractal", strlen(value))) {
callchain_param.mode = CHAIN_GRAPH_REL;
return 0;
}
if (!strncmp(value, "folded", strlen(value))) {
callchain_param.mode = CHAIN_FOLDED;
return 0;
}
return -1;
}
static int parse_callchain_order(const char *value)
{
if (!strncmp(value, "caller", strlen(value))) {
callchain_param.order = ORDER_CALLER;
callchain_param.order_set = true;
return 0;
}
if (!strncmp(value, "callee", strlen(value))) {
callchain_param.order = ORDER_CALLEE;
callchain_param.order_set = true;
return 0;
}
return -1;
}
static int parse_callchain_sort_key(const char *value)
{
if (!strncmp(value, "function", strlen(value))) {
callchain_param.key = CCKEY_FUNCTION;
return 0;
}
if (!strncmp(value, "address", strlen(value))) {
callchain_param.key = CCKEY_ADDRESS;
return 0;
}
if (!strncmp(value, "srcline", strlen(value))) {
callchain_param.key = CCKEY_SRCLINE;
return 0;
}
if (!strncmp(value, "branch", strlen(value))) {
callchain_param.branch_callstack = 1;
return 0;
}
return -1;
}
static int parse_callchain_value(const char *value)
{
if (!strncmp(value, "percent", strlen(value))) {
callchain_param.value = CCVAL_PERCENT;
return 0;
}
if (!strncmp(value, "period", strlen(value))) {
callchain_param.value = CCVAL_PERIOD;
return 0;
}
if (!strncmp(value, "count", strlen(value))) {
callchain_param.value = CCVAL_COUNT;
return 0;
}
return -1;
}
static int get_stack_size(const char *str, unsigned long *_size)
{
char *endptr;
unsigned long size;
unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
size = strtoul(str, &endptr, 0);
do {
if (*endptr)
break;
size = round_up(size, sizeof(u64));
if (!size || size > max_size)
break;
*_size = size;
return 0;
} while (0);
pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
max_size, str);
return -1;
}
static int
__parse_callchain_report_opt(const char *arg, bool allow_record_opt)
{
char *tok;
char *endptr, *saveptr = NULL;
bool minpcnt_set = false;
bool record_opt_set = false;
bool try_stack_size = false;
callchain_param.enabled = true;
symbol_conf.use_callchain = true;
if (!arg)
return 0;
while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
if (!strncmp(tok, "none", strlen(tok))) {
callchain_param.mode = CHAIN_NONE;
callchain_param.enabled = false;
symbol_conf.use_callchain = false;
return 0;
}
if (!parse_callchain_mode(tok) ||
!parse_callchain_order(tok) ||
!parse_callchain_sort_key(tok) ||
!parse_callchain_value(tok)) {
/* parsing ok - move on to the next */
try_stack_size = false;
goto next;
} else if (allow_record_opt && !record_opt_set) {
if (parse_callchain_record(tok, &callchain_param))
goto try_numbers;
/* assume that number followed by 'dwarf' is stack size */
if (callchain_param.record_mode == CALLCHAIN_DWARF)
try_stack_size = true;
record_opt_set = true;
goto next;
}
try_numbers:
if (try_stack_size) {
unsigned long size = 0;
if (get_stack_size(tok, &size) < 0)
return -1;
callchain_param.dump_size = size;
try_stack_size = false;
} else if (!minpcnt_set) {
/* try to get the min percent */
callchain_param.min_percent = strtod(tok, &endptr);
if (tok == endptr)
return -1;
minpcnt_set = true;
} else {
/* try print limit at last */
callchain_param.print_limit = strtoul(tok, &endptr, 0);
if (tok == endptr)
return -1;
}
next:
arg = NULL;
}
if (callchain_register_param(&callchain_param) < 0) {
pr_err("Can't register callchain params\n");
return -1;
}
return 0;
}
int parse_callchain_report_opt(const char *arg)
{
return __parse_callchain_report_opt(arg, false);
}
int parse_callchain_top_opt(const char *arg)
{
return __parse_callchain_report_opt(arg, true);
}
int parse_callchain_record(const char *arg, struct callchain_param *param)
{
char *tok, *name, *saveptr = NULL;
char *buf;
int ret = -1;
/* We need buffer that we know we can write to. */
buf = malloc(strlen(arg) + 1);
if (!buf)
return -ENOMEM;
strcpy(buf, arg);
tok = strtok_r((char *)buf, ",", &saveptr);
name = tok ? : (char *)buf;
do {
/* Framepointer style */
if (!strncmp(name, "fp", sizeof("fp"))) {
ret = 0;
param->record_mode = CALLCHAIN_FP;
tok = strtok_r(NULL, ",", &saveptr);
if (tok) {
unsigned long size;
size = strtoul(tok, &name, 0);
if (size < (unsigned) sysctl__max_stack())
param->max_stack = size;
}
break;
/* Dwarf style */
} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
const unsigned long default_stack_dump_size = 8192;
ret = 0;
param->record_mode = CALLCHAIN_DWARF;
param->dump_size = default_stack_dump_size;
dwarf_callchain_users = true;
tok = strtok_r(NULL, ",", &saveptr);
if (tok) {
unsigned long size = 0;
ret = get_stack_size(tok, &size);
param->dump_size = size;
}
} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
if (!strtok_r(NULL, ",", &saveptr)) {
param->record_mode = CALLCHAIN_LBR;
ret = 0;
} else
pr_err("callchain: No more arguments "
"needed for --call-graph lbr\n");
break;
} else {
pr_err("callchain: Unknown --call-graph option "
"value: %s\n", arg);
break;
}
} while (0);
free(buf);
return ret;
}
int perf_callchain_config(const char *var, const char *value)
{
char *endptr;
if (!strstarts(var, "call-graph."))
return 0;
var += sizeof("call-graph.") - 1;
if (!strcmp(var, "record-mode"))
return parse_callchain_record_opt(value, &callchain_param);
if (!strcmp(var, "dump-size")) {
unsigned long size = 0;
int ret;
ret = get_stack_size(value, &size);
callchain_param.dump_size = size;
return ret;
}
if (!strcmp(var, "print-type")){
int ret;
ret = parse_callchain_mode(value);
if (ret == -1)
pr_err("Invalid callchain mode: %s\n", value);
return ret;
}
if (!strcmp(var, "order")){
int ret;
ret = parse_callchain_order(value);
if (ret == -1)
pr_err("Invalid callchain order: %s\n", value);
return ret;
}
if (!strcmp(var, "sort-key")){
int ret;
ret = parse_callchain_sort_key(value);
if (ret == -1)
pr_err("Invalid callchain sort key: %s\n", value);
return ret;
}
if (!strcmp(var, "threshold")) {
callchain_param.min_percent = strtod(value, &endptr);
if (value == endptr) {
pr_err("Invalid callchain threshold: %s\n", value);
return -1;
}
}
if (!strcmp(var, "print-limit")) {
callchain_param.print_limit = strtod(value, &endptr);
if (value == endptr) {
pr_err("Invalid callchain print limit: %s\n", value);
return -1;
}
}
return 0;
}
static void
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
enum chain_mode mode)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct callchain_node *rnode;
u64 chain_cumul = callchain_cumul_hits(chain);
while (*p) {
u64 rnode_cumul;
parent = *p;
rnode = rb_entry(parent, struct callchain_node, rb_node);
rnode_cumul = callchain_cumul_hits(rnode);
switch (mode) {
case CHAIN_FLAT:
case CHAIN_FOLDED:
if (rnode->hit < chain->hit)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
break;
case CHAIN_GRAPH_ABS: /* Falldown */
case CHAIN_GRAPH_REL:
if (rnode_cumul < chain_cumul)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
break;
case CHAIN_NONE:
default:
break;
}
}
rb_link_node(&chain->rb_node, parent, p);
rb_insert_color(&chain->rb_node, root);
}
static void
__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
u64 min_hit)
{
struct rb_node *n;
struct callchain_node *child;
n = rb_first(&node->rb_root_in);
while (n) {
child = rb_entry(n, struct callchain_node, rb_node_in);
n = rb_next(n);
__sort_chain_flat(rb_root, child, min_hit);
}
if (node->hit && node->hit >= min_hit)
rb_insert_callchain(rb_root, node, CHAIN_FLAT);
}
/*
* Once we get every callchains from the stream, we can now
* sort them by hit
*/
static void
sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
u64 min_hit, struct callchain_param *param __maybe_unused)
{
*rb_root = RB_ROOT;
__sort_chain_flat(rb_root, &root->node, min_hit);
}
static void __sort_chain_graph_abs(struct callchain_node *node,
u64 min_hit)
{
struct rb_node *n;
struct callchain_node *child;
node->rb_root = RB_ROOT;
n = rb_first(&node->rb_root_in);
while (n) {
child = rb_entry(n, struct callchain_node, rb_node_in);
n = rb_next(n);
__sort_chain_graph_abs(child, min_hit);
if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
CHAIN_GRAPH_ABS);
}
}
static void
sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
u64 min_hit, struct callchain_param *param __maybe_unused)
{
__sort_chain_graph_abs(&chain_root->node, min_hit);
rb_root->rb_node = chain_root->node.rb_root.rb_node;
}
static void __sort_chain_graph_rel(struct callchain_node *node,
double min_percent)
{
struct rb_node *n;
struct callchain_node *child;
u64 min_hit;
node->rb_root = RB_ROOT;
min_hit = ceil(node->children_hit * min_percent);
n = rb_first(&node->rb_root_in);
while (n) {
child = rb_entry(n, struct callchain_node, rb_node_in);
n = rb_next(n);
__sort_chain_graph_rel(child, min_percent);
if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
CHAIN_GRAPH_REL);
}
}
static void
sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
u64 min_hit __maybe_unused, struct callchain_param *param)
{
__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
rb_root->rb_node = chain_root->node.rb_root.rb_node;
}
int callchain_register_param(struct callchain_param *param)
{
switch (param->mode) {
case CHAIN_GRAPH_ABS:
param->sort = sort_chain_graph_abs;
break;
case CHAIN_GRAPH_REL:
param->sort = sort_chain_graph_rel;
break;
case CHAIN_FLAT:
case CHAIN_FOLDED:
param->sort = sort_chain_flat;
break;
case CHAIN_NONE:
default:
return -1;
}
return 0;
}
/*
* Create a child for a parent. If inherit_children, then the new child
* will become the new parent of it's parent children
*/
static struct callchain_node *
create_child(struct callchain_node *parent, bool inherit_children)
{
struct callchain_node *new;
new = zalloc(sizeof(*new));
if (!new) {
perror("not enough memory to create child for code path tree");
return NULL;
}
new->parent = parent;
INIT_LIST_HEAD(&new->val);
INIT_LIST_HEAD(&new->parent_val);
if (inherit_children) {
struct rb_node *n;
struct callchain_node *child;
new->rb_root_in = parent->rb_root_in;
parent->rb_root_in = RB_ROOT;
n = rb_first(&new->rb_root_in);
while (n) {
child = rb_entry(n, struct callchain_node, rb_node_in);
child->parent = new;
n = rb_next(n);
}
/* make it the first child */
rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
}
return new;
}
/*
* Fill the node with callchain values
*/
static int
fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
{
struct callchain_cursor_node *cursor_node;
node->val_nr = cursor->nr - cursor->pos;
if (!node->val_nr)
pr_warning("Warning: empty node in callchain tree\n");
cursor_node = callchain_cursor_current(cursor);
while (cursor_node) {
struct callchain_list *call;
call = zalloc(sizeof(*call));
if (!call) {
perror("not enough memory for the code path tree");
return -1;
}
call->ip = cursor_node->ip;
call->ms = cursor_node->ms;
call->ms.map = map__get(call->ms.map);
call->ms.maps = maps__get(call->ms.maps);
call->srcline = cursor_node->srcline;
if (cursor_node->branch) {
call->branch_count = 1;
if (cursor_node->branch_from) {
/*
* branch_from is set with value somewhere else
* to imply it's "to" of a branch.
*/
call->brtype_stat.branch_to = true;
if (cursor_node->branch_flags.predicted)
call->predicted_count = 1;
if (cursor_node->branch_flags.abort)
call->abort_count = 1;
branch_type_count(&call->brtype_stat,
&cursor_node->branch_flags,
cursor_node->branch_from,
cursor_node->ip);
} else {
/*
* It's "from" of a branch
*/
call->brtype_stat.branch_to = false;
call->cycles_count =
cursor_node->branch_flags.cycles;
call->iter_count = cursor_node->nr_loop_iter;
call->iter_cycles = cursor_node->iter_cycles;
}
}
list_add_tail(&call->list, &node->val);
callchain_cursor_advance(cursor);
cursor_node = callchain_cursor_current(cursor);
}
return 0;
}
static struct callchain_node *
add_child(struct callchain_node *parent,
struct callchain_cursor *cursor,
u64 period)
{
struct callchain_node *new;
new = create_child(parent, false);
if (new == NULL)
return NULL;
if (fill_node(new, cursor) < 0) {
struct callchain_list *call, *tmp;
list_for_each_entry_safe(call, tmp, &new->val, list) {
list_del_init(&call->list);
map__zput(call->ms.map);
maps__zput(call->ms.maps);
free(call);
}
free(new);
return NULL;
}
new->children_hit = 0;
new->hit = period;
new->children_count = 0;
new->count = 1;
return new;
}
enum match_result {
MATCH_ERROR = -1,
MATCH_EQ,
MATCH_LT,
MATCH_GT,
};
static enum match_result match_chain_strings(const char *left,
const char *right)
{
enum match_result ret = MATCH_EQ;
int cmp;
if (left && right)
cmp = strcmp(left, right);
else if (!left && right)
cmp = 1;
else if (left && !right)
cmp = -1;
else
return MATCH_ERROR;
if (cmp != 0)
ret = cmp < 0 ? MATCH_LT : MATCH_GT;
return ret;
}
/*
* We need to always use relative addresses because we're aggregating
* callchains from multiple threads, i.e. different address spaces, so
* comparing absolute addresses make no sense as a symbol in a DSO may end up
* in a different address when used in a different binary or even the same
* binary but with some sort of address randomization technique, thus we need
* to compare just relative addresses. -acme
*/
static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
struct map *right_map, u64 right_ip)
{
struct dso *left_dso = left_map ? map__dso(left_map) : NULL;
struct dso *right_dso = right_map ? map__dso(right_map) : NULL;
if (left_dso != right_dso)
return left_dso < right_dso ? MATCH_LT : MATCH_GT;
if (left_ip != right_ip)
return left_ip < right_ip ? MATCH_LT : MATCH_GT;
return MATCH_EQ;
}
static enum match_result match_chain(struct callchain_cursor_node *node,
struct callchain_list *cnode)
{
enum match_result match = MATCH_ERROR;
switch (callchain_param.key) {
case CCKEY_SRCLINE:
match = match_chain_strings(cnode->srcline, node->srcline);
if (match != MATCH_ERROR)
break;
/* otherwise fall-back to symbol-based comparison below */
fallthrough;
case CCKEY_FUNCTION:
if (node->ms.sym && cnode->ms.sym) {
/*
* Compare inlined frames based on their symbol name
* because different inlined frames will have the same
* symbol start. Otherwise do a faster comparison based
* on the symbol start address.
*/
if (cnode->ms.sym->inlined || node->ms.sym->inlined) {
match = match_chain_strings(cnode->ms.sym->name,
node->ms.sym->name);
if (match != MATCH_ERROR)
break;
} else {
match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
node->ms.map, node->ms.sym->start);
break;
}
}
/* otherwise fall-back to IP-based comparison below */
fallthrough;
case CCKEY_ADDRESS:
default:
match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->ms.map, node->ip);
break;
}
if (match == MATCH_EQ && node->branch) {
cnode->branch_count++;
if (node->branch_from) {
/*
* It's "to" of a branch
*/
cnode->brtype_stat.branch_to = true;
if (node->branch_flags.predicted)
cnode->predicted_count++;
if (node->branch_flags.abort)
cnode->abort_count++;
branch_type_count(&cnode->brtype_stat,
&node->branch_flags,
node->branch_from,
node->ip);
} else {
/*
* It's "from" of a branch
*/
cnode->brtype_stat.branch_to = false;
cnode->cycles_count += node->branch_flags.cycles;
cnode->iter_count += node->nr_loop_iter;
cnode->iter_cycles += node->iter_cycles;
cnode->from_count++;
}
}
return match;
}
/*
* Split the parent in two parts (a new child is created) and
* give a part of its callchain to the created child.
* Then create another child to host the given callchain of new branch
*/
static int
split_add_child(struct callchain_node *parent,
struct callchain_cursor *cursor,
struct callchain_list *to_split,
u64 idx_parents, u64 idx_local, u64 period)
{
struct callchain_node *new;
struct list_head *old_tail;
unsigned int idx_total = idx_parents + idx_local;
/* split */
new = create_child(parent, true);
if (new == NULL)
return -1;
/* split the callchain and move a part to the new child */
old_tail = parent->val.prev;
list_del_range(&to_split->list, old_tail);
new->val.next = &to_split->list;
new->val.prev = old_tail;
to_split->list.prev = &new->val;
old_tail->next = &new->val;
/* split the hits */
new->hit = parent->hit;
new->children_hit = parent->children_hit;
parent->children_hit = callchain_cumul_hits(new);
new->val_nr = parent->val_nr - idx_local;
parent->val_nr = idx_local;
new->count = parent->count;
new->children_count = parent->children_count;
parent->children_count = callchain_cumul_counts(new);
/* create a new child for the new branch if any */
if (idx_total < cursor->nr) {
struct callchain_node *first;
struct callchain_list *cnode;
struct callchain_cursor_node *node;
struct rb_node *p, **pp;
parent->hit = 0;
parent->children_hit += period;
parent->count = 0;
parent->children_count += 1;
node = callchain_cursor_current(cursor);
new = add_child(parent, cursor, period);
if (new == NULL)
return -1;
/*
* This is second child since we moved parent's children
* to new (first) child above.
*/
p = parent->rb_root_in.rb_node;
first = rb_entry(p, struct callchain_node, rb_node_in);
cnode = list_first_entry(&first->val, struct callchain_list,
list);
if (match_chain(node, cnode) == MATCH_LT)
pp = &p->rb_left;
else
pp = &p->rb_right;
rb_link_node(&new->rb_node_in, p, pp);
rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
} else {
parent->hit = period;
parent->count = 1;
}
return 0;
}
static enum match_result
append_chain(struct callchain_node *root,
struct callchain_cursor *cursor,
u64 period);
static int
append_chain_children(struct callchain_node *root,
struct callchain_cursor *cursor,
u64 period)
{
struct callchain_node *rnode;
struct callchain_cursor_node *node;
struct rb_node **p = &root->rb_root_in.rb_node;
struct rb_node *parent = NULL;
node = callchain_cursor_current(cursor);
if (!node)
return -1;
/* lookup in children */
while (*p) {
enum match_result ret;
parent = *p;
rnode = rb_entry(parent, struct callchain_node, rb_node_in);
/* If at least first entry matches, rely to children */
ret = append_chain(rnode, cursor, period);
if (ret == MATCH_EQ)
goto inc_children_hit;
if (ret == MATCH_ERROR)
return -1;
if (ret == MATCH_LT)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
/* nothing in children, add to the current node */
rnode = add_child(root, cursor, period);
if (rnode == NULL)
return -1;
rb_link_node(&rnode->rb_node_in, parent, p);
rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
inc_children_hit:
root->children_hit += period;
root->children_count++;
return 0;
}
static enum match_result
append_chain(struct callchain_node *root,
struct callchain_cursor *cursor,
u64 period)
{
struct callchain_list *cnode;
u64 start = cursor->pos;
bool found = false;
u64 matches;
enum match_result cmp = MATCH_ERROR;
/*
* Lookup in the current node
* If we have a symbol, then compare the start to match
* anywhere inside a function, unless function
* mode is disabled.
*/
list_for_each_entry(cnode, &root->val, list) {
struct callchain_cursor_node *node;
node = callchain_cursor_current(cursor);
if (!node)
break;
cmp = match_chain(node, cnode);
if (cmp != MATCH_EQ)
break;
found = true;
callchain_cursor_advance(cursor);
}
/* matches not, relay no the parent */
if (!found) {
WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
return cmp;
}
matches = cursor->pos - start;
/* we match only a part of the node. Split it and add the new chain */
if (matches < root->val_nr) {
if (split_add_child(root, cursor, cnode, start, matches,
period) < 0)
return MATCH_ERROR;
return MATCH_EQ;
}
/* we match 100% of the path, increment the hit */
if (matches == root->val_nr && cursor->pos == cursor->nr) {
root->hit += period;
root->count++;
return MATCH_EQ;
}
/* We match the node and still have a part remaining */
if (append_chain_children(root, cursor, period) < 0)
return MATCH_ERROR;
return MATCH_EQ;
}
int callchain_append(struct callchain_root *root,
struct callchain_cursor *cursor,
u64 period)
{
if (cursor == NULL)
return -1;
if (!cursor->nr)
return 0;
callchain_cursor_commit(cursor);
if (append_chain_children(&root->node, cursor, period) < 0)
return -1;
if (cursor->nr > root->max_depth)
root->max_depth = cursor->nr;
return 0;
}
static int
merge_chain_branch(struct callchain_cursor *cursor,
struct callchain_node *dst, struct callchain_node *src)
{
struct callchain_cursor_node **old_last = cursor->last;
struct callchain_node *child;
struct callchain_list *list, *next_list;
struct rb_node *n;
int old_pos = cursor->nr;
int err = 0;
list_for_each_entry_safe(list, next_list, &src->val, list) {
struct map_symbol ms = {
.maps = maps__get(list->ms.maps),
.map = map__get(list->ms.map),
};
callchain_cursor_append(cursor, list->ip, &ms, false, NULL, 0, 0, 0, list->srcline);
list_del_init(&list->list);
map__zput(ms.map);
maps__zput(ms.maps);
map__zput(list->ms.map);
maps__zput(list->ms.maps);
free(list);
}
if (src->hit) {
callchain_cursor_commit(cursor);
if (append_chain_children(dst, cursor, src->hit) < 0)
return -1;
}
n = rb_first(&src->rb_root_in);
while (n) {
child = container_of(n, struct callchain_node, rb_node_in);
n = rb_next(n);
rb_erase(&child->rb_node_in, &src->rb_root_in);
err = merge_chain_branch(cursor, dst, child);
if (err)
break;
free(child);
}
cursor->nr = old_pos;
cursor->last = old_last;
return err;
}
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src)
{
return merge_chain_branch(cursor, &dst->node, &src->node);
}
int callchain_cursor_append(struct callchain_cursor *cursor,
u64 ip, struct map_symbol *ms,
bool branch, struct branch_flags *flags,
int nr_loop_iter, u64 iter_cycles, u64 branch_from,
const char *srcline)
{
struct callchain_cursor_node *node = *cursor->last;
if (!node) {
node = calloc(1, sizeof(*node));
if (!node)
return -ENOMEM;
*cursor->last = node;
}
node->ip = ip;
maps__zput(node->ms.maps);
map__zput(node->ms.map);
node->ms = *ms;
node->ms.maps = maps__get(ms->maps);
node->ms.map = map__get(ms->map);
node->branch = branch;
node->nr_loop_iter = nr_loop_iter;
node->iter_cycles = iter_cycles;
node->srcline = srcline;
if (flags)
memcpy(&node->branch_flags, flags,
sizeof(struct branch_flags));
node->branch_from = branch_from;
cursor->nr++;
cursor->last = &node->next;
return 0;
}
int sample__resolve_callchain(struct perf_sample *sample,
struct callchain_cursor *cursor, struct symbol **parent,
struct evsel *evsel, struct addr_location *al,
int max_stack)
{
if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
return 0;
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
return thread__resolve_callchain(al->thread, cursor, evsel, sample,
parent, al, max_stack);
}
return 0;
}
int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
{
if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
!symbol_conf.show_branchflag_count)
return 0;
return callchain_append(he->callchain, get_tls_callchain_cursor(), sample->period);
}
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
bool hide_unresolved)
{
struct machine *machine = maps__machine(node->ms.maps);
maps__put(al->maps);
al->maps = maps__get(node->ms.maps);
map__put(al->map);
al->map = map__get(node->ms.map);
al->sym = node->ms.sym;
al->srcline = node->srcline;
al->addr = node->ip;
if (al->sym == NULL) {
if (hide_unresolved)
return 0;
if (al->map == NULL)
goto out;
}
if (RC_CHK_ACCESS(al->maps) == RC_CHK_ACCESS(machine__kernel_maps(machine))) {
if (machine__is_host(machine)) {
al->cpumode = PERF_RECORD_MISC_KERNEL;
al->level = 'k';
} else {
al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
al->level = 'g';
}
} else {
if (machine__is_host(machine)) {
al->cpumode = PERF_RECORD_MISC_USER;
al->level = '.';
} else if (perf_guest) {
al->cpumode = PERF_RECORD_MISC_GUEST_USER;
al->level = 'u';
} else {
al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
al->level = 'H';
}
}
out:
return 1;
}
char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize, bool show_dso)
{
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
int printed;
if (cl->ms.sym) {
const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
if (show_srcline && cl->srcline)
printed = scnprintf(bf, bfsize, "%s %s%s",
cl->ms.sym->name, cl->srcline,
inlined);
else
printed = scnprintf(bf, bfsize, "%s%s",
cl->ms.sym->name, inlined);
} else
printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
if (show_dso)
scnprintf(bf + printed, bfsize - printed, " %s",
cl->ms.map ?
map__dso(cl->ms.map)->short_name :
"unknown");
return bf;
}
char *callchain_node__scnprintf_value(struct callchain_node *node,
char *bf, size_t bfsize, u64 total)
{
double percent = 0.0;
u64 period = callchain_cumul_hits(node);
unsigned count = callchain_cumul_counts(node);
if (callchain_param.mode == CHAIN_FOLDED) {
period = node->hit;
count = node->count;
}
switch (callchain_param.value) {
case CCVAL_PERIOD:
scnprintf(bf, bfsize, "%"PRIu64, period);
break;
case CCVAL_COUNT:
scnprintf(bf, bfsize, "%u", count);
break;
case CCVAL_PERCENT:
default:
if (total)
percent = period * 100.0 / total;
scnprintf(bf, bfsize, "%.2f%%", percent);
break;
}
return bf;
}
int callchain_node__fprintf_value(struct callchain_node *node,
FILE *fp, u64 total)
{
double percent = 0.0;
u64 period = callchain_cumul_hits(node);
unsigned count = callchain_cumul_counts(node);
if (callchain_param.mode == CHAIN_FOLDED) {
period = node->hit;
count = node->count;
}
switch (callchain_param.value) {
case CCVAL_PERIOD:
return fprintf(fp, "%"PRIu64, period);
case CCVAL_COUNT:
return fprintf(fp, "%u", count);
case CCVAL_PERCENT:
default:
if (total)
percent = period * 100.0 / total;
return percent_color_fprintf(fp, "%.2f%%", percent);
}
return 0;
}
static void callchain_counts_value(struct callchain_node *node,
u64 *branch_count, u64 *predicted_count,
u64 *abort_count, u64 *cycles_count)
{
struct callchain_list *clist;
list_for_each_entry(clist, &node->val, list) {
if (branch_count)
*branch_count += clist->branch_count;
if (predicted_count)
*predicted_count += clist->predicted_count;
if (abort_count)
*abort_count += clist->abort_count;
if (cycles_count)
*cycles_count += clist->cycles_count;
}
}
static int callchain_node_branch_counts_cumul(struct callchain_node *node,
u64 *branch_count,
u64 *predicted_count,
u64 *abort_count,
u64 *cycles_count)
{
struct callchain_node *child;
struct rb_node *n;
n = rb_first(&node->rb_root_in);
while (n) {
child = rb_entry(n, struct callchain_node, rb_node_in);
n = rb_next(n);
callchain_node_branch_counts_cumul(child, branch_count,
predicted_count,
abort_count,
cycles_count);
callchain_counts_value(child, branch_count,
predicted_count, abort_count,
cycles_count);
}
return 0;
}
int callchain_branch_counts(struct callchain_root *root,
u64 *branch_count, u64 *predicted_count,
u64 *abort_count, u64 *cycles_count)
{
if (branch_count)
*branch_count = 0;
if (predicted_count)
*predicted_count = 0;
if (abort_count)
*abort_count = 0;
if (cycles_count)
*cycles_count = 0;
return callchain_node_branch_counts_cumul(&root->node,
branch_count,
predicted_count,
abort_count,
cycles_count);
}
static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
{
return scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
}
static int count_float_printf(int idx, const char *str, float value,
char *bf, int bfsize, float threshold)
{
if (threshold != 0.0 && value < threshold)
return 0;
return scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
}
static int branch_to_str(char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count,
struct branch_type_stat *brtype_stat)
{
int printed, i = 0;
printed = branch_type_str(brtype_stat, bf, bfsize);
if (printed)
i++;
if (predicted_count < branch_count) {
printed += count_float_printf(i++, "predicted",
predicted_count * 100.0 / branch_count,
bf + printed, bfsize - printed, 0.0);
}
if (abort_count) {
printed += count_float_printf(i++, "abort",
abort_count * 100.0 / branch_count,
bf + printed, bfsize - printed, 0.1);
}
if (i)
printed += scnprintf(bf + printed, bfsize - printed, ")");
return printed;
}
static int branch_from_str(char *bf, int bfsize,
u64 branch_count,
u64 cycles_count, u64 iter_count,
u64 iter_cycles, u64 from_count)
{
int printed = 0, i = 0;
u64 cycles, v = 0;
cycles = cycles_count / branch_count;
if (cycles) {
printed += count_pri64_printf(i++, "cycles",
cycles,
bf + printed, bfsize - printed);
}
if (iter_count && from_count) {
v = iter_count / from_count;
if (v) {
printed += count_pri64_printf(i++, "iter",
v, bf + printed, bfsize - printed);
printed += count_pri64_printf(i++, "avg_cycles",
iter_cycles / iter_count,
bf + printed, bfsize - printed);
}
}
if (i)
printed += scnprintf(bf + printed, bfsize - printed, ")");
return printed;
}
static int counts_str_build(char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
u64 from_count,
struct branch_type_stat *brtype_stat)
{
int printed;
if (branch_count == 0)
return scnprintf(bf, bfsize, " (calltrace)");
if (brtype_stat->branch_to) {
printed = branch_to_str(bf, bfsize, branch_count,
predicted_count, abort_count, brtype_stat);
} else {
printed = branch_from_str(bf, bfsize, branch_count,
cycles_count, iter_count, iter_cycles,
from_count);
}
if (!printed)
bf[0] = 0;
return printed;
}
static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
u64 from_count,
struct branch_type_stat *brtype_stat)
{
char str[256];
counts_str_build(str, sizeof(str), branch_count,
predicted_count, abort_count, cycles_count,
iter_count, iter_cycles, from_count, brtype_stat);
if (fp)
return fprintf(fp, "%s", str);
return scnprintf(bf, bfsize, "%s", str);
}
int callchain_list_counts__printf_value(struct callchain_list *clist,
FILE *fp, char *bf, int bfsize)
{
u64 branch_count, predicted_count;
u64 abort_count, cycles_count;
u64 iter_count, iter_cycles;
u64 from_count;
branch_count = clist->branch_count;
predicted_count = clist->predicted_count;
abort_count = clist->abort_count;
cycles_count = clist->cycles_count;
iter_count = clist->iter_count;
iter_cycles = clist->iter_cycles;
from_count = clist->from_count;
return callchain_counts_printf(fp, bf, bfsize, branch_count,
predicted_count, abort_count,
cycles_count, iter_count, iter_cycles,
from_count, &clist->brtype_stat);
}
static void free_callchain_node(struct callchain_node *node)
{
struct callchain_list *list, *tmp;
struct callchain_node *child;
struct rb_node *n;
list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
list_del_init(&list->list);
map__zput(list->ms.map);
maps__zput(list->ms.maps);
free(list);
}
list_for_each_entry_safe(list, tmp, &node->val, list) {
list_del_init(&list->list);
map__zput(list->ms.map);
maps__zput(list->ms.maps);
free(list);
}
n = rb_first(&node->rb_root_in);
while (n) {
child = container_of(n, struct callchain_node, rb_node_in);
n = rb_next(n);
rb_erase(&child->rb_node_in, &node->rb_root_in);
free_callchain_node(child);
free(child);
}
}
void free_callchain(struct callchain_root *root)
{
if (!symbol_conf.use_callchain)
return;
free_callchain_node(&root->node);
}
static u64 decay_callchain_node(struct callchain_node *node)
{
struct callchain_node *child;
struct rb_node *n;
u64 child_hits = 0;
n = rb_first(&node->rb_root_in);
while (n) {
child = container_of(n, struct callchain_node, rb_node_in);
child_hits += decay_callchain_node(child);
n = rb_next(n);
}
node->hit = (node->hit * 7) / 8;
node->children_hit = child_hits;
return node->hit;
}
void decay_callchain(struct callchain_root *root)
{
if (!symbol_conf.use_callchain)
return;
decay_callchain_node(&root->node);
}
int callchain_node__make_parent_list(struct callchain_node *node)
{
struct callchain_node *parent = node->parent;
struct callchain_list *chain, *new;
LIST_HEAD(head);
while (parent) {
list_for_each_entry_reverse(chain, &parent->val, list) {
new = malloc(sizeof(*new));
if (new == NULL)
goto out;
*new = *chain;
new->has_children = false;
new->ms.map = map__get(new->ms.map);
list_add_tail(&new->list, &head);
}
parent = parent->parent;
}
list_for_each_entry_safe_reverse(chain, new, &head, list)
list_move_tail(&chain->list, &node->parent_val);
if (!list_empty(&node->parent_val)) {
chain = list_first_entry(&node->parent_val, struct callchain_list, list);
chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
chain = list_first_entry(&node->val, struct callchain_list, list);
chain->has_children = false;
}
return 0;
out:
list_for_each_entry_safe(chain, new, &head, list) {
list_del_init(&chain->list);
map__zput(chain->ms.map);
maps__zput(chain->ms.maps);
free(chain);
}
return -ENOMEM;
}
static void callchain_cursor__delete(void *vcursor)
{
struct callchain_cursor *cursor = vcursor;
struct callchain_cursor_node *node, *next;
callchain_cursor_reset(cursor);
for (node = cursor->first; node != NULL; node = next) {
next = node->next;
free(node);
}
free(cursor);
}
static void init_callchain_cursor_key(void)
{
if (pthread_key_create(&callchain_cursor, callchain_cursor__delete)) {
pr_err("callchain cursor creation failed");
abort();
}
}
struct callchain_cursor *get_tls_callchain_cursor(void)
{
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
struct callchain_cursor *cursor;
pthread_once(&once_control, init_callchain_cursor_key);
cursor = pthread_getspecific(callchain_cursor);
if (!cursor) {
cursor = zalloc(sizeof(*cursor));
if (!cursor)
pr_debug3("%s: not enough memory\n", __func__);
pthread_setspecific(callchain_cursor, cursor);
}
return cursor;
}
int callchain_cursor__copy(struct callchain_cursor *dst,
struct callchain_cursor *src)
{
int rc = 0;
callchain_cursor_reset(dst);
callchain_cursor_commit(src);
while (true) {
struct callchain_cursor_node *node;
node = callchain_cursor_current(src);
if (node == NULL)
break;
rc = callchain_cursor_append(dst, node->ip, &node->ms,
node->branch, &node->branch_flags,
node->nr_loop_iter,
node->iter_cycles,
node->branch_from, node->srcline);
if (rc)
break;
callchain_cursor_advance(src);
}
return rc;
}
/*
* Initialize a cursor before adding entries inside, but keep
* the previously allocated entries as a cache.
*/
void callchain_cursor_reset(struct callchain_cursor *cursor)
{
struct callchain_cursor_node *node;
cursor->nr = 0;
cursor->last = &cursor->first;
for (node = cursor->first; node != NULL; node = node->next) {
map__zput(node->ms.map);
maps__zput(node->ms.maps);
}
}
void callchain_param_setup(u64 sample_type, const char *arch)
{
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
(sample_type & PERF_SAMPLE_STACK_USER)) {
callchain_param.record_mode = CALLCHAIN_DWARF;
dwarf_callchain_users = true;
} else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
}
/*
* It's necessary to use libunwind to reliably determine the caller of
* a leaf function on aarch64, as otherwise we cannot know whether to
* start from the LR or FP.
*
* Always starting from the LR can result in duplicate or entirely
* erroneous entries. Always skipping the LR and starting from the FP
* can result in missing entries.
*/
if (callchain_param.record_mode == CALLCHAIN_FP && !strcmp(arch, "arm64"))
dwarf_callchain_users = true;
}
static bool chain_match(struct callchain_list *base_chain,
struct callchain_list *pair_chain)
{
enum match_result match;
match = match_chain_strings(base_chain->srcline,
pair_chain->srcline);
if (match != MATCH_ERROR)
return match == MATCH_EQ;
match = match_chain_dso_addresses(base_chain->ms.map,
base_chain->ip,
pair_chain->ms.map,
pair_chain->ip);
return match == MATCH_EQ;
}
bool callchain_cnode_matched(struct callchain_node *base_cnode,
struct callchain_node *pair_cnode)
{
struct callchain_list *base_chain, *pair_chain;
bool match = false;
pair_chain = list_first_entry(&pair_cnode->val,
struct callchain_list,
list);
list_for_each_entry(base_chain, &base_cnode->val, list) {
if (&pair_chain->list == &pair_cnode->val)
return false;
if (!base_chain->srcline || !pair_chain->srcline) {
pair_chain = list_next_entry(pair_chain, list);
continue;
}
match = chain_match(base_chain, pair_chain);
if (!match)
return false;
pair_chain = list_next_entry(pair_chain, list);
}
/*
* Say chain1 is ABC, chain2 is ABCD, we consider they are
* not fully matched.
*/
if (pair_chain && (&pair_chain->list != &pair_cnode->val))
return false;
return match;
}
static u64 count_callchain_hits(struct hist_entry *he)
{
struct rb_root *root = &he->sorted_chain;
struct rb_node *rb_node = rb_first(root);
struct callchain_node *node;
u64 chain_hits = 0;
while (rb_node) {
node = rb_entry(rb_node, struct callchain_node, rb_node);
chain_hits += node->hit;
rb_node = rb_next(rb_node);
}
return chain_hits;
}
u64 callchain_total_hits(struct hists *hists)
{
struct rb_node *next = rb_first_cached(&hists->entries);
u64 chain_hits = 0;
while (next) {
struct hist_entry *he = rb_entry(next, struct hist_entry,
rb_node);
chain_hits += count_callchain_hits(he);
next = rb_next(&he->rb_node);
}
return chain_hits;
}
s64 callchain_avg_cycles(struct callchain_node *cnode)
{
struct callchain_list *chain;
s64 cycles = 0;
list_for_each_entry(chain, &cnode->val, list) {
if (chain->srcline && chain->branch_count)
cycles += chain->cycles_count / chain->branch_count;
}
return cycles;
}
| linux-master | tools/perf/util/callchain.c |
// SPDX-License-Identifier: GPL-2.0
#include "string2.h"
#include "strfilter.h"
#include <errno.h>
#include <stdlib.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/zalloc.h>
/* Operators */
static const char *OP_and = "&"; /* Logical AND */
static const char *OP_or = "|"; /* Logical OR */
static const char *OP_not = "!"; /* Logical NOT */
#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!')
#define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')')
static void strfilter_node__delete(struct strfilter_node *node)
{
if (node) {
if (node->p && !is_operator(*node->p))
zfree((char **)&node->p);
strfilter_node__delete(node->l);
strfilter_node__delete(node->r);
free(node);
}
}
void strfilter__delete(struct strfilter *filter)
{
if (filter) {
strfilter_node__delete(filter->root);
free(filter);
}
}
static const char *get_token(const char *s, const char **e)
{
const char *p;
s = skip_spaces(s);
if (*s == '\0') {
p = s;
goto end;
}
p = s + 1;
if (!is_separator(*s)) {
/* End search */
retry:
while (*p && !is_separator(*p) && !isspace(*p))
p++;
/* Escape and special case: '!' is also used in glob pattern */
if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) {
p++;
goto retry;
}
}
end:
*e = p;
return s;
}
static struct strfilter_node *strfilter_node__alloc(const char *op,
struct strfilter_node *l,
struct strfilter_node *r)
{
struct strfilter_node *node = zalloc(sizeof(*node));
if (node) {
node->p = op;
node->l = l;
node->r = r;
}
return node;
}
static struct strfilter_node *strfilter_node__new(const char *s,
const char **ep)
{
struct strfilter_node root, *cur, *last_op;
const char *e;
if (!s)
return NULL;
memset(&root, 0, sizeof(root));
last_op = cur = &root;
s = get_token(s, &e);
while (*s != '\0' && *s != ')') {
switch (*s) {
case '&': /* Exchg last OP->r with AND */
if (!cur->r || !last_op->r)
goto error;
cur = strfilter_node__alloc(OP_and, last_op->r, NULL);
if (!cur)
goto nomem;
last_op->r = cur;
last_op = cur;
break;
case '|': /* Exchg the root with OR */
if (!cur->r || !root.r)
goto error;
cur = strfilter_node__alloc(OP_or, root.r, NULL);
if (!cur)
goto nomem;
root.r = cur;
last_op = cur;
break;
case '!': /* Add NOT as a leaf node */
if (cur->r)
goto error;
cur->r = strfilter_node__alloc(OP_not, NULL, NULL);
if (!cur->r)
goto nomem;
cur = cur->r;
break;
case '(': /* Recursively parses inside the parenthesis */
if (cur->r)
goto error;
cur->r = strfilter_node__new(s + 1, &s);
if (!s)
goto nomem;
if (!cur->r || *s != ')')
goto error;
e = s + 1;
break;
default:
if (cur->r)
goto error;
cur->r = strfilter_node__alloc(NULL, NULL, NULL);
if (!cur->r)
goto nomem;
cur->r->p = strndup(s, e - s);
if (!cur->r->p)
goto nomem;
}
s = get_token(e, &e);
}
if (!cur->r)
goto error;
*ep = s;
return root.r;
nomem:
s = NULL;
error:
*ep = s;
strfilter_node__delete(root.r);
return NULL;
}
/*
* Parse filter rule and return new strfilter.
* Return NULL if fail, and *ep == NULL if memory allocation failed.
*/
struct strfilter *strfilter__new(const char *rules, const char **err)
{
struct strfilter *filter = zalloc(sizeof(*filter));
const char *ep = NULL;
if (filter)
filter->root = strfilter_node__new(rules, &ep);
if (!filter || !filter->root || *ep != '\0') {
if (err)
*err = ep;
strfilter__delete(filter);
filter = NULL;
}
return filter;
}
static int strfilter__append(struct strfilter *filter, bool _or,
const char *rules, const char **err)
{
struct strfilter_node *right, *root;
const char *ep = NULL;
if (!filter || !rules)
return -EINVAL;
right = strfilter_node__new(rules, &ep);
if (!right || *ep != '\0') {
if (err)
*err = ep;
goto error;
}
root = strfilter_node__alloc(_or ? OP_or : OP_and, filter->root, right);
if (!root) {
ep = NULL;
goto error;
}
filter->root = root;
return 0;
error:
strfilter_node__delete(right);
return ep ? -EINVAL : -ENOMEM;
}
int strfilter__or(struct strfilter *filter, const char *rules, const char **err)
{
return strfilter__append(filter, true, rules, err);
}
int strfilter__and(struct strfilter *filter, const char *rules,
const char **err)
{
return strfilter__append(filter, false, rules, err);
}
static bool strfilter_node__compare(struct strfilter_node *node,
const char *str)
{
if (!node || !node->p)
return false;
switch (*node->p) {
case '|': /* OR */
return strfilter_node__compare(node->l, str) ||
strfilter_node__compare(node->r, str);
case '&': /* AND */
return strfilter_node__compare(node->l, str) &&
strfilter_node__compare(node->r, str);
case '!': /* NOT */
return !strfilter_node__compare(node->r, str);
default:
return strglobmatch(str, node->p);
}
}
/* Return true if STR matches the filter rules */
bool strfilter__compare(struct strfilter *filter, const char *str)
{
if (!filter)
return false;
return strfilter_node__compare(filter->root, str);
}
static int strfilter_node__sprint(struct strfilter_node *node, char *buf);
/* sprint node in parenthesis if needed */
static int strfilter_node__sprint_pt(struct strfilter_node *node, char *buf)
{
int len;
int pt = node->r ? 2 : 0; /* don't need to check node->l */
if (buf && pt)
*buf++ = '(';
len = strfilter_node__sprint(node, buf);
if (len < 0)
return len;
if (buf && pt)
*(buf + len) = ')';
return len + pt;
}
static int strfilter_node__sprint(struct strfilter_node *node, char *buf)
{
int len = 0, rlen;
if (!node || !node->p)
return -EINVAL;
switch (*node->p) {
case '|':
case '&':
len = strfilter_node__sprint_pt(node->l, buf);
if (len < 0)
return len;
fallthrough;
case '!':
if (buf) {
*(buf + len++) = *node->p;
buf += len;
} else
len++;
rlen = strfilter_node__sprint_pt(node->r, buf);
if (rlen < 0)
return rlen;
len += rlen;
break;
default:
len = strlen(node->p);
if (buf)
strcpy(buf, node->p);
}
return len;
}
char *strfilter__string(struct strfilter *filter)
{
int len;
char *ret = NULL;
len = strfilter_node__sprint(filter->root, NULL);
if (len < 0)
return NULL;
ret = malloc(len + 1);
if (ret)
strfilter_node__sprint(filter->root, ret);
return ret;
}
| linux-master | tools/perf/util/strfilter.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <string.h>
#include <linux/zalloc.h>
#include "block-info.h"
#include "sort.h"
#include "annotate.h"
#include "symbol.h"
#include "dso.h"
#include "map.h"
#include "srcline.h"
#include "evlist.h"
#include "hist.h"
#include "ui/browsers/hists.h"
static struct block_header_column {
const char *name;
int width;
} block_columns[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT] = {
.name = "Sampled Cycles%",
.width = 15,
},
[PERF_HPP_REPORT__BLOCK_LBR_CYCLES] = {
.name = "Sampled Cycles",
.width = 14,
},
[PERF_HPP_REPORT__BLOCK_CYCLES_PCT] = {
.name = "Avg Cycles%",
.width = 11,
},
[PERF_HPP_REPORT__BLOCK_AVG_CYCLES] = {
.name = "Avg Cycles",
.width = 10,
},
[PERF_HPP_REPORT__BLOCK_RANGE] = {
.name = "[Program Block Range]",
.width = 70,
},
[PERF_HPP_REPORT__BLOCK_DSO] = {
.name = "Shared Object",
.width = 20,
}
};
struct block_info *block_info__get(struct block_info *bi)
{
if (bi)
refcount_inc(&bi->refcnt);
return bi;
}
void block_info__put(struct block_info *bi)
{
if (bi && refcount_dec_and_test(&bi->refcnt))
free(bi);
}
struct block_info *block_info__new(void)
{
struct block_info *bi = zalloc(sizeof(*bi));
if (bi)
refcount_set(&bi->refcnt, 1);
return bi;
}
int64_t __block_info__cmp(struct hist_entry *left, struct hist_entry *right)
{
struct block_info *bi_l = left->block_info;
struct block_info *bi_r = right->block_info;
int cmp;
if (!bi_l->sym || !bi_r->sym) {
if (!bi_l->sym && !bi_r->sym)
return -1;
else if (!bi_l->sym)
return -1;
else
return 1;
}
cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
if (cmp)
return cmp;
if (bi_l->start != bi_r->start)
return (int64_t)(bi_r->start - bi_l->start);
return (int64_t)(bi_r->end - bi_l->end);
}
int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return __block_info__cmp(left, right);
}
static void init_block_info(struct block_info *bi, struct symbol *sym,
struct cyc_hist *ch, int offset,
u64 total_cycles)
{
bi->sym = sym;
bi->start = ch->start;
bi->end = offset;
bi->cycles = ch->cycles;
bi->cycles_aggr = ch->cycles_aggr;
bi->num = ch->num;
bi->num_aggr = ch->num_aggr;
bi->total_cycles = total_cycles;
memcpy(bi->cycles_spark, ch->cycles_spark,
NUM_SPARKS * sizeof(u64));
}
int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
u64 *block_cycles_aggr, u64 total_cycles)
{
struct annotation *notes;
struct cyc_hist *ch;
static struct addr_location al;
u64 cycles = 0;
if (!he->ms.map || !he->ms.sym)
return 0;
memset(&al, 0, sizeof(al));
al.map = he->ms.map;
al.sym = he->ms.sym;
notes = symbol__annotation(he->ms.sym);
if (!notes || !notes->src || !notes->src->cycles_hist)
return 0;
ch = notes->src->cycles_hist;
for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
if (ch[i].num_aggr) {
struct block_info *bi;
struct hist_entry *he_block;
bi = block_info__new();
if (!bi)
return -1;
init_block_info(bi, he->ms.sym, &ch[i], i,
total_cycles);
cycles += bi->cycles_aggr / bi->num_aggr;
he_block = hists__add_entry_block(&bh->block_hists,
&al, bi);
if (!he_block) {
block_info__put(bi);
return -1;
}
}
}
if (block_cycles_aggr)
*block_cycles_aggr += cycles;
return 0;
}
static int block_column_header(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hists *hists __maybe_unused,
int line __maybe_unused,
int *span __maybe_unused)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
block_fmt->header);
}
static int block_column_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists __maybe_unused)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
return block_fmt->width;
}
static int color_pct(struct perf_hpp *hpp, int width, double pct)
{
#ifdef HAVE_SLANG_SUPPORT
if (use_browser) {
return __hpp__slsmg_color_printf(hpp, "%*.2f%%",
width - 1, pct);
}
#endif
return hpp_color_scnprintf(hpp, "%*.2f%%", width - 1, pct);
}
static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
double ratio = 0.0;
if (block_fmt->total_cycles)
ratio = (double)bi->cycles_aggr / (double)block_fmt->total_cycles;
return color_pct(hpp, block_fmt->width, 100.0 * ratio);
}
static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
struct hist_entry *left,
struct hist_entry *right)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi_l = left->block_info;
struct block_info *bi_r = right->block_info;
double l, r;
if (block_fmt->total_cycles) {
l = ((double)bi_l->cycles_aggr /
(double)block_fmt->total_cycles) * 100000.0;
r = ((double)bi_r->cycles_aggr /
(double)block_fmt->total_cycles) * 100000.0;
return (int64_t)l - (int64_t)r;
}
return 0;
}
static void cycles_string(u64 cycles, char *buf, int size)
{
if (cycles >= 1000000)
scnprintf(buf, size, "%.1fM", (double)cycles / 1000000.0);
else if (cycles >= 1000)
scnprintf(buf, size, "%.1fK", (double)cycles / 1000.0);
else
scnprintf(buf, size, "%1d", cycles);
}
static int block_cycles_lbr_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
char cycles_buf[16];
cycles_string(bi->cycles_aggr, cycles_buf, sizeof(cycles_buf));
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
cycles_buf);
}
static int block_cycles_pct_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
double ratio = 0.0;
u64 avg;
if (block_fmt->block_cycles && bi->num_aggr) {
avg = bi->cycles_aggr / bi->num_aggr;
ratio = (double)avg / (double)block_fmt->block_cycles;
}
return color_pct(hpp, block_fmt->width, 100.0 * ratio);
}
static int block_avg_cycles_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
char cycles_buf[16];
cycles_string(bi->cycles_aggr / bi->num_aggr, cycles_buf,
sizeof(cycles_buf));
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
cycles_buf);
}
static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
char buf[128];
char *start_line, *end_line;
symbol_conf.disable_add2line_warn = true;
start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
he->ms.sym);
end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
he->ms.sym);
if (start_line != SRCLINE_UNKNOWN &&
end_line != SRCLINE_UNKNOWN) {
scnprintf(buf, sizeof(buf), "[%s -> %s]",
start_line, end_line);
} else {
scnprintf(buf, sizeof(buf), "[%7lx -> %7lx]",
bi->start, bi->end);
}
zfree_srcline(&start_line);
zfree_srcline(&end_line);
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
}
static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct map *map = he->ms.map;
if (map && map__dso(map)) {
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
map__dso(map)->short_name);
}
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
"[unknown]");
}
static void init_block_header(struct block_fmt *block_fmt)
{
struct perf_hpp_fmt *fmt = &block_fmt->fmt;
BUG_ON(block_fmt->idx >= PERF_HPP_REPORT__BLOCK_MAX_INDEX);
block_fmt->header = block_columns[block_fmt->idx].name;
block_fmt->width = block_columns[block_fmt->idx].width;
fmt->header = block_column_header;
fmt->width = block_column_width;
}
static void hpp_register(struct block_fmt *block_fmt, int idx,
struct perf_hpp_list *hpp_list)
{
struct perf_hpp_fmt *fmt = &block_fmt->fmt;
block_fmt->idx = idx;
INIT_LIST_HEAD(&fmt->list);
INIT_LIST_HEAD(&fmt->sort_list);
switch (idx) {
case PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT:
fmt->color = block_total_cycles_pct_entry;
fmt->cmp = block_info__cmp;
fmt->sort = block_total_cycles_pct_sort;
break;
case PERF_HPP_REPORT__BLOCK_LBR_CYCLES:
fmt->entry = block_cycles_lbr_entry;
break;
case PERF_HPP_REPORT__BLOCK_CYCLES_PCT:
fmt->color = block_cycles_pct_entry;
break;
case PERF_HPP_REPORT__BLOCK_AVG_CYCLES:
fmt->entry = block_avg_cycles_entry;
break;
case PERF_HPP_REPORT__BLOCK_RANGE:
fmt->entry = block_range_entry;
break;
case PERF_HPP_REPORT__BLOCK_DSO:
fmt->entry = block_dso_entry;
break;
default:
return;
}
init_block_header(block_fmt);
perf_hpp_list__column_register(hpp_list, fmt);
}
static void register_block_columns(struct perf_hpp_list *hpp_list,
struct block_fmt *block_fmts,
int *block_hpps, int nr_hpps)
{
for (int i = 0; i < nr_hpps; i++)
hpp_register(&block_fmts[i], block_hpps[i], hpp_list);
}
static void init_block_hist(struct block_hist *bh, struct block_fmt *block_fmts,
int *block_hpps, int nr_hpps)
{
__hists__init(&bh->block_hists, &bh->block_list);
perf_hpp_list__init(&bh->block_list);
bh->block_list.nr_header_lines = 1;
register_block_columns(&bh->block_list, block_fmts,
block_hpps, nr_hpps);
/* Sort by the first fmt */
perf_hpp_list__register_sort_field(&bh->block_list, &block_fmts[0].fmt);
}
static int process_block_report(struct hists *hists,
struct block_report *block_report,
u64 total_cycles, int *block_hpps,
int nr_hpps)
{
struct rb_node *next = rb_first_cached(&hists->entries);
struct block_hist *bh = &block_report->hist;
struct hist_entry *he;
if (nr_hpps > PERF_HPP_REPORT__BLOCK_MAX_INDEX)
return -1;
block_report->nr_fmts = nr_hpps;
init_block_hist(bh, block_report->fmts, block_hpps, nr_hpps);
while (next) {
he = rb_entry(next, struct hist_entry, rb_node);
block_info__process_sym(he, bh, &block_report->cycles,
total_cycles);
next = rb_next(&he->rb_node);
}
for (int i = 0; i < nr_hpps; i++) {
block_report->fmts[i].total_cycles = total_cycles;
block_report->fmts[i].block_cycles = block_report->cycles;
}
hists__output_resort(&bh->block_hists, NULL);
return 0;
}
struct block_report *block_info__create_report(struct evlist *evlist,
u64 total_cycles,
int *block_hpps, int nr_hpps,
int *nr_reps)
{
struct block_report *block_reports;
int nr_hists = evlist->core.nr_entries, i = 0;
struct evsel *pos;
block_reports = calloc(nr_hists, sizeof(struct block_report));
if (!block_reports)
return NULL;
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
process_block_report(hists, &block_reports[i], total_cycles,
block_hpps, nr_hpps);
i++;
}
*nr_reps = nr_hists;
return block_reports;
}
void block_info__free_report(struct block_report *reps, int nr_reps)
{
for (int i = 0; i < nr_reps; i++)
hists__delete_entries(&reps[i].hist.block_hists);
free(reps);
}
int report__browse_block_hists(struct block_hist *bh, float min_percent,
struct evsel *evsel, struct perf_env *env,
struct annotation_options *annotation_opts)
{
int ret;
switch (use_browser) {
case 0:
symbol_conf.report_individual_block = true;
hists__fprintf(&bh->block_hists, true, 0, 0, min_percent,
stdout, true);
return 0;
case 1:
symbol_conf.report_individual_block = true;
ret = block_hists_tui_browse(bh, evsel, min_percent,
env, annotation_opts);
return ret;
default:
return -1;
}
return 0;
}
float block_info__total_cycles_percent(struct hist_entry *he)
{
struct block_info *bi = he->block_info;
if (bi->total_cycles)
return bi->cycles * 100.0 / bi->total_cycles;
return 0.0;
}
| linux-master | tools/perf/util/block-info.c |
// SPDX-License-Identifier: GPL-2.0
static int find_map(void **start, void **end, const char *name)
{
FILE *maps;
char line[128];
int found = 0;
maps = fopen("/proc/self/maps", "r");
if (!maps) {
fprintf(stderr, "cannot open maps\n");
return -1;
}
while (!found && fgets(line, sizeof(line), maps)) {
int m = -1;
/* We care only about private r-x mappings. */
if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n",
start, end, &m))
continue;
if (m < 0)
continue;
if (!strncmp(&line[m], name, strlen(name)))
found = 1;
}
fclose(maps);
return !found;
}
| linux-master | tools/perf/util/find-map.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* probe-file.c : operate ftrace k/uprobe events files
*
* Written by Masami Hiramatsu <[email protected]>
*/
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
#include <linux/zalloc.h>
#include "namespaces.h"
#include "event.h"
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
#include "build-id.h"
#include "dso.h"
#include "color.h"
#include "symbol.h"
#include "strbuf.h"
#include <api/fs/tracing_path.h>
#include <api/fs/fs.h>
#include "probe-event.h"
#include "probe-file.h"
#include "session.h"
#include "perf_regs.h"
#include "string2.h"
/* 4096 - 2 ('\n' + '\0') */
#define MAX_CMDLEN 4094
static bool print_common_warning(int err, bool readwrite)
{
if (err == -EACCES)
pr_warning("No permission to %s tracefs.\nPlease %s\n",
readwrite ? "write" : "read",
readwrite ? "run this command again with sudo." :
"try 'sudo mount -o remount,mode=755 /sys/kernel/tracing/'");
else
return false;
return true;
}
static bool print_configure_probe_event(int kerr, int uerr)
{
const char *config, *file;
if (kerr == -ENOENT && uerr == -ENOENT) {
file = "{k,u}probe_events";
config = "CONFIG_KPROBE_EVENTS=y and CONFIG_UPROBE_EVENTS=y";
} else if (kerr == -ENOENT) {
file = "kprobe_events";
config = "CONFIG_KPROBE_EVENTS=y";
} else if (uerr == -ENOENT) {
file = "uprobe_events";
config = "CONFIG_UPROBE_EVENTS=y";
} else
return false;
if (!debugfs__configured() && !tracefs__configured())
pr_warning("Debugfs or tracefs is not mounted\n"
"Please try 'sudo mount -t tracefs nodev /sys/kernel/tracing/'\n");
else
pr_warning("%s/%s does not exist.\nPlease rebuild kernel with %s.\n",
tracing_path_mount(), file, config);
return true;
}
static void print_open_warning(int err, bool uprobe, bool readwrite)
{
char sbuf[STRERR_BUFSIZE];
if (print_common_warning(err, readwrite))
return;
if (print_configure_probe_event(uprobe ? 0 : err, uprobe ? err : 0))
return;
pr_warning("Failed to open %s/%cprobe_events: %s\n",
tracing_path_mount(), uprobe ? 'u' : 'k',
str_error_r(-err, sbuf, sizeof(sbuf)));
}
static void print_both_open_warning(int kerr, int uerr, bool readwrite)
{
char sbuf[STRERR_BUFSIZE];
if (kerr == uerr && print_common_warning(kerr, readwrite))
return;
if (print_configure_probe_event(kerr, uerr))
return;
if (kerr < 0)
pr_warning("Failed to open %s/kprobe_events: %s.\n",
tracing_path_mount(),
str_error_r(-kerr, sbuf, sizeof(sbuf)));
if (uerr < 0)
pr_warning("Failed to open %s/uprobe_events: %s.\n",
tracing_path_mount(),
str_error_r(-uerr, sbuf, sizeof(sbuf)));
}
int open_trace_file(const char *trace_file, bool readwrite)
{
char buf[PATH_MAX];
int ret;
ret = e_snprintf(buf, PATH_MAX, "%s/%s", tracing_path_mount(), trace_file);
if (ret >= 0) {
pr_debug("Opening %s write=%d\n", buf, readwrite);
if (readwrite && !probe_event_dry_run)
ret = open(buf, O_RDWR | O_APPEND, 0);
else
ret = open(buf, O_RDONLY, 0);
if (ret < 0)
ret = -errno;
}
return ret;
}
static int open_kprobe_events(bool readwrite)
{
return open_trace_file("kprobe_events", readwrite);
}
static int open_uprobe_events(bool readwrite)
{
return open_trace_file("uprobe_events", readwrite);
}
int probe_file__open(int flag)
{
int fd;
if (flag & PF_FL_UPROBE)
fd = open_uprobe_events(flag & PF_FL_RW);
else
fd = open_kprobe_events(flag & PF_FL_RW);
if (fd < 0)
print_open_warning(fd, flag & PF_FL_UPROBE, flag & PF_FL_RW);
return fd;
}
int probe_file__open_both(int *kfd, int *ufd, int flag)
{
if (!kfd || !ufd)
return -EINVAL;
*kfd = open_kprobe_events(flag & PF_FL_RW);
*ufd = open_uprobe_events(flag & PF_FL_RW);
if (*kfd < 0 && *ufd < 0) {
print_both_open_warning(*kfd, *ufd, flag & PF_FL_RW);
return *kfd;
}
return 0;
}
/* Get raw string list of current kprobe_events or uprobe_events */
struct strlist *probe_file__get_rawlist(int fd)
{
int ret, idx, fddup;
FILE *fp;
char buf[MAX_CMDLEN];
char *p;
struct strlist *sl;
if (fd < 0)
return NULL;
sl = strlist__new(NULL, NULL);
if (sl == NULL)
return NULL;
fddup = dup(fd);
if (fddup < 0)
goto out_free_sl;
fp = fdopen(fddup, "r");
if (!fp)
goto out_close_fddup;
while (!feof(fp)) {
p = fgets(buf, MAX_CMDLEN, fp);
if (!p)
break;
idx = strlen(p) - 1;
if (p[idx] == '\n')
p[idx] = '\0';
ret = strlist__add(sl, buf);
if (ret < 0) {
pr_debug("strlist__add failed (%d)\n", ret);
goto out_close_fp;
}
}
fclose(fp);
return sl;
out_close_fp:
fclose(fp);
goto out_free_sl;
out_close_fddup:
close(fddup);
out_free_sl:
strlist__delete(sl);
return NULL;
}
static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
{
char buf[128];
struct strlist *sl, *rawlist;
struct str_node *ent;
struct probe_trace_event tev;
int ret = 0;
memset(&tev, 0, sizeof(tev));
rawlist = probe_file__get_rawlist(fd);
if (!rawlist)
return NULL;
sl = strlist__new(NULL, NULL);
strlist__for_each_entry(ent, rawlist) {
ret = parse_probe_trace_command(ent->s, &tev);
if (ret < 0)
break;
if (include_group) {
ret = e_snprintf(buf, 128, "%s:%s", tev.group,
tev.event);
if (ret >= 0)
ret = strlist__add(sl, buf);
} else
ret = strlist__add(sl, tev.event);
clear_probe_trace_event(&tev);
/* Skip if there is same name multi-probe event in the list */
if (ret == -EEXIST)
ret = 0;
if (ret < 0)
break;
}
strlist__delete(rawlist);
if (ret < 0) {
strlist__delete(sl);
return NULL;
}
return sl;
}
/* Get current perf-probe event names */
struct strlist *probe_file__get_namelist(int fd)
{
return __probe_file__get_namelist(fd, false);
}
int probe_file__add_event(int fd, struct probe_trace_event *tev)
{
int ret = 0;
char *buf = synthesize_probe_trace_command(tev);
char sbuf[STRERR_BUFSIZE];
if (!buf) {
pr_debug("Failed to synthesize probe trace event.\n");
return -EINVAL;
}
pr_debug("Writing event: %s\n", buf);
if (!probe_event_dry_run) {
if (write(fd, buf, strlen(buf)) < (int)strlen(buf)) {
ret = -errno;
pr_warning("Failed to write event: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
}
}
free(buf);
return ret;
}
static int __del_trace_probe_event(int fd, struct str_node *ent)
{
char *p;
char buf[128];
int ret;
/* Convert from perf-probe event to trace-probe event */
ret = e_snprintf(buf, 128, "-:%s", ent->s);
if (ret < 0)
goto error;
p = strchr(buf + 2, ':');
if (!p) {
pr_debug("Internal error: %s should have ':' but not.\n",
ent->s);
ret = -ENOTSUP;
goto error;
}
*p = '/';
pr_debug("Writing event: %s\n", buf);
ret = write(fd, buf, strlen(buf));
if (ret < 0) {
ret = -errno;
goto error;
}
return 0;
error:
pr_warning("Failed to delete event: %s\n",
str_error_r(-ret, buf, sizeof(buf)));
return ret;
}
int probe_file__get_events(int fd, struct strfilter *filter,
struct strlist *plist)
{
struct strlist *namelist;
struct str_node *ent;
const char *p;
int ret = -ENOENT;
if (!plist)
return -EINVAL;
namelist = __probe_file__get_namelist(fd, true);
if (!namelist)
return -ENOENT;
strlist__for_each_entry(ent, namelist) {
p = strchr(ent->s, ':');
if ((p && strfilter__compare(filter, p + 1)) ||
strfilter__compare(filter, ent->s)) {
ret = strlist__add(plist, ent->s);
if (ret == -ENOMEM) {
pr_err("strlist__add failed with -ENOMEM\n");
goto out;
}
ret = 0;
}
}
out:
strlist__delete(namelist);
return ret;
}
int probe_file__del_strlist(int fd, struct strlist *namelist)
{
int ret = 0;
struct str_node *ent;
strlist__for_each_entry(ent, namelist) {
ret = __del_trace_probe_event(fd, ent);
if (ret < 0)
break;
}
return ret;
}
int probe_file__del_events(int fd, struct strfilter *filter)
{
struct strlist *namelist;
int ret;
namelist = strlist__new(NULL, NULL);
if (!namelist)
return -ENOMEM;
ret = probe_file__get_events(fd, filter, namelist);
if (ret < 0)
goto out;
ret = probe_file__del_strlist(fd, namelist);
out:
strlist__delete(namelist);
return ret;
}
/* Caller must ensure to remove this entry from list */
static void probe_cache_entry__delete(struct probe_cache_entry *entry)
{
if (entry) {
BUG_ON(!list_empty(&entry->node));
strlist__delete(entry->tevlist);
clear_perf_probe_event(&entry->pev);
zfree(&entry->spev);
free(entry);
}
}
static struct probe_cache_entry *
probe_cache_entry__new(struct perf_probe_event *pev)
{
struct probe_cache_entry *entry = zalloc(sizeof(*entry));
if (entry) {
INIT_LIST_HEAD(&entry->node);
entry->tevlist = strlist__new(NULL, NULL);
if (!entry->tevlist)
zfree(&entry);
else if (pev) {
entry->spev = synthesize_perf_probe_command(pev);
if (!entry->spev ||
perf_probe_event__copy(&entry->pev, pev) < 0) {
probe_cache_entry__delete(entry);
return NULL;
}
}
}
return entry;
}
int probe_cache_entry__get_event(struct probe_cache_entry *entry,
struct probe_trace_event **tevs)
{
struct probe_trace_event *tev;
struct str_node *node;
int ret, i;
ret = strlist__nr_entries(entry->tevlist);
if (ret > probe_conf.max_probes)
return -E2BIG;
*tevs = zalloc(ret * sizeof(*tev));
if (!*tevs)
return -ENOMEM;
i = 0;
strlist__for_each_entry(node, entry->tevlist) {
tev = &(*tevs)[i++];
ret = parse_probe_trace_command(node->s, tev);
if (ret < 0)
break;
}
return i;
}
/* For the kernel probe caches, pass target = NULL or DSO__NAME_KALLSYMS */
static int probe_cache__open(struct probe_cache *pcache, const char *target,
struct nsinfo *nsi)
{
char cpath[PATH_MAX];
char sbuildid[SBUILD_ID_SIZE];
char *dir_name = NULL;
bool is_kallsyms = false;
int ret, fd;
struct nscookie nsc;
if (target && build_id_cache__cached(target)) {
/* This is a cached buildid */
strlcpy(sbuildid, target, SBUILD_ID_SIZE);
dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
goto found;
}
if (!target || !strcmp(target, DSO__NAME_KALLSYMS)) {
target = DSO__NAME_KALLSYMS;
is_kallsyms = true;
ret = sysfs__sprintf_build_id("/", sbuildid);
} else {
nsinfo__mountns_enter(nsi, &nsc);
ret = filename__sprintf_build_id(target, sbuildid);
nsinfo__mountns_exit(&nsc);
}
if (ret < 0) {
pr_debug("Failed to get build-id from %s.\n", target);
return ret;
}
/* If we have no buildid cache, make it */
if (!build_id_cache__cached(sbuildid)) {
ret = build_id_cache__add_s(sbuildid, target, nsi,
is_kallsyms, NULL);
if (ret < 0) {
pr_debug("Failed to add build-id cache: %s\n", target);
return ret;
}
}
dir_name = build_id_cache__cachedir(sbuildid, target, nsi, is_kallsyms,
false);
found:
if (!dir_name) {
pr_debug("Failed to get cache from %s\n", target);
return -ENOMEM;
}
snprintf(cpath, PATH_MAX, "%s/probes", dir_name);
fd = open(cpath, O_CREAT | O_RDWR, 0644);
if (fd < 0)
pr_debug("Failed to open cache(%d): %s\n", fd, cpath);
free(dir_name);
pcache->fd = fd;
return fd;
}
static int probe_cache__load(struct probe_cache *pcache)
{
struct probe_cache_entry *entry = NULL;
char buf[MAX_CMDLEN], *p;
int ret = 0, fddup;
FILE *fp;
fddup = dup(pcache->fd);
if (fddup < 0)
return -errno;
fp = fdopen(fddup, "r");
if (!fp) {
close(fddup);
return -EINVAL;
}
while (!feof(fp)) {
if (!fgets(buf, MAX_CMDLEN, fp))
break;
p = strchr(buf, '\n');
if (p)
*p = '\0';
/* #perf_probe_event or %sdt_event */
if (buf[0] == '#' || buf[0] == '%') {
entry = probe_cache_entry__new(NULL);
if (!entry) {
ret = -ENOMEM;
goto out;
}
if (buf[0] == '%')
entry->sdt = true;
entry->spev = strdup(buf + 1);
if (entry->spev)
ret = parse_perf_probe_command(buf + 1,
&entry->pev);
else
ret = -ENOMEM;
if (ret < 0) {
probe_cache_entry__delete(entry);
goto out;
}
list_add_tail(&entry->node, &pcache->entries);
} else { /* trace_probe_event */
if (!entry) {
ret = -EINVAL;
goto out;
}
ret = strlist__add(entry->tevlist, buf);
if (ret == -ENOMEM) {
pr_err("strlist__add failed with -ENOMEM\n");
goto out;
}
}
}
out:
fclose(fp);
return ret;
}
static struct probe_cache *probe_cache__alloc(void)
{
struct probe_cache *pcache = zalloc(sizeof(*pcache));
if (pcache) {
INIT_LIST_HEAD(&pcache->entries);
pcache->fd = -EINVAL;
}
return pcache;
}
void probe_cache__purge(struct probe_cache *pcache)
{
struct probe_cache_entry *entry, *n;
list_for_each_entry_safe(entry, n, &pcache->entries, node) {
list_del_init(&entry->node);
probe_cache_entry__delete(entry);
}
}
void probe_cache__delete(struct probe_cache *pcache)
{
if (!pcache)
return;
probe_cache__purge(pcache);
if (pcache->fd > 0)
close(pcache->fd);
free(pcache);
}
struct probe_cache *probe_cache__new(const char *target, struct nsinfo *nsi)
{
struct probe_cache *pcache = probe_cache__alloc();
int ret;
if (!pcache)
return NULL;
ret = probe_cache__open(pcache, target, nsi);
if (ret < 0) {
pr_debug("Cache open error: %d\n", ret);
goto out_err;
}
ret = probe_cache__load(pcache);
if (ret < 0) {
pr_debug("Cache read error: %d\n", ret);
goto out_err;
}
return pcache;
out_err:
probe_cache__delete(pcache);
return NULL;
}
static bool streql(const char *a, const char *b)
{
if (a == b)
return true;
if (!a || !b)
return false;
return !strcmp(a, b);
}
struct probe_cache_entry *
probe_cache__find(struct probe_cache *pcache, struct perf_probe_event *pev)
{
struct probe_cache_entry *entry = NULL;
char *cmd = synthesize_perf_probe_command(pev);
if (!cmd)
return NULL;
for_each_probe_cache_entry(entry, pcache) {
if (pev->sdt) {
if (entry->pev.event &&
streql(entry->pev.event, pev->event) &&
(!pev->group ||
streql(entry->pev.group, pev->group)))
goto found;
continue;
}
/* Hit if same event name or same command-string */
if ((pev->event &&
(streql(entry->pev.group, pev->group) &&
streql(entry->pev.event, pev->event))) ||
(!strcmp(entry->spev, cmd)))
goto found;
}
entry = NULL;
found:
free(cmd);
return entry;
}
struct probe_cache_entry *
probe_cache__find_by_name(struct probe_cache *pcache,
const char *group, const char *event)
{
struct probe_cache_entry *entry = NULL;
for_each_probe_cache_entry(entry, pcache) {
/* Hit if same event name or same command-string */
if (streql(entry->pev.group, group) &&
streql(entry->pev.event, event))
goto found;
}
entry = NULL;
found:
return entry;
}
int probe_cache__add_entry(struct probe_cache *pcache,
struct perf_probe_event *pev,
struct probe_trace_event *tevs, int ntevs)
{
struct probe_cache_entry *entry = NULL;
char *command;
int i, ret = 0;
if (!pcache || !pev || !tevs || ntevs <= 0) {
ret = -EINVAL;
goto out_err;
}
/* Remove old cache entry */
entry = probe_cache__find(pcache, pev);
if (entry) {
list_del_init(&entry->node);
probe_cache_entry__delete(entry);
}
ret = -ENOMEM;
entry = probe_cache_entry__new(pev);
if (!entry)
goto out_err;
for (i = 0; i < ntevs; i++) {
if (!tevs[i].point.symbol)
continue;
command = synthesize_probe_trace_command(&tevs[i]);
if (!command)
goto out_err;
ret = strlist__add(entry->tevlist, command);
if (ret == -ENOMEM) {
pr_err("strlist__add failed with -ENOMEM\n");
goto out_err;
}
free(command);
}
list_add_tail(&entry->node, &pcache->entries);
pr_debug("Added probe cache: %d\n", ntevs);
return 0;
out_err:
pr_debug("Failed to add probe caches\n");
probe_cache_entry__delete(entry);
return ret;
}
#ifdef HAVE_GELF_GETNOTE_SUPPORT
static unsigned long long sdt_note__get_addr(struct sdt_note *note)
{
return note->bit32 ?
(unsigned long long)note->addr.a32[SDT_NOTE_IDX_LOC] :
(unsigned long long)note->addr.a64[SDT_NOTE_IDX_LOC];
}
static unsigned long long sdt_note__get_ref_ctr_offset(struct sdt_note *note)
{
return note->bit32 ?
(unsigned long long)note->addr.a32[SDT_NOTE_IDX_REFCTR] :
(unsigned long long)note->addr.a64[SDT_NOTE_IDX_REFCTR];
}
static const char * const type_to_suffix[] = {
":s64", "", "", "", ":s32", "", ":s16", ":s8",
"", ":u8", ":u16", "", ":u32", "", "", "", ":u64"
};
/*
* Isolate the string number and convert it into a decimal value;
* this will be an index to get suffix of the uprobe name (defining
* the type)
*/
static int sdt_arg_parse_size(char *n_ptr, const char **suffix)
{
long type_idx;
type_idx = strtol(n_ptr, NULL, 10);
if (type_idx < -8 || type_idx > 8) {
pr_debug4("Failed to get a valid sdt type\n");
return -1;
}
*suffix = type_to_suffix[type_idx + 8];
return 0;
}
static int synthesize_sdt_probe_arg(struct strbuf *buf, int i, const char *arg)
{
char *op, *desc = strdup(arg), *new_op = NULL;
const char *suffix = "";
int ret = -1;
if (desc == NULL) {
pr_debug4("Allocation error\n");
return ret;
}
/*
* Argument is in N@OP format. N is size of the argument and OP is
* the actual assembly operand. N can be omitted; in that case
* argument is just OP(without @).
*/
op = strchr(desc, '@');
if (op) {
op[0] = '\0';
op++;
if (sdt_arg_parse_size(desc, &suffix))
goto error;
} else {
op = desc;
}
ret = arch_sdt_arg_parse_op(op, &new_op);
if (ret < 0)
goto error;
if (ret == SDT_ARG_VALID) {
ret = strbuf_addf(buf, " arg%d=%s%s", i + 1, new_op, suffix);
if (ret < 0)
goto error;
}
ret = 0;
error:
free(desc);
free(new_op);
return ret;
}
static char *synthesize_sdt_probe_command(struct sdt_note *note,
const char *pathname,
const char *sdtgrp)
{
struct strbuf buf;
char *ret = NULL;
int i, args_count, err;
unsigned long long ref_ctr_offset;
char *arg;
int arg_idx = 0;
if (strbuf_init(&buf, 32) < 0)
return NULL;
err = strbuf_addf(&buf, "p:%s/%s %s:0x%llx",
sdtgrp, note->name, pathname,
sdt_note__get_addr(note));
ref_ctr_offset = sdt_note__get_ref_ctr_offset(note);
if (ref_ctr_offset && err >= 0)
err = strbuf_addf(&buf, "(0x%llx)", ref_ctr_offset);
if (err < 0)
goto error;
if (!note->args)
goto out;
if (note->args) {
char **args = argv_split(note->args, &args_count);
if (args == NULL)
goto error;
for (i = 0; i < args_count; ) {
/*
* FIXUP: Arm64 ELF section '.note.stapsdt' uses string
* format "-4@[sp, NUM]" if a probe is to access data in
* the stack, e.g. below is an example for the SDT
* Arguments:
*
* Arguments: -4@[sp, 12] -4@[sp, 8] -4@[sp, 4]
*
* Since the string introduces an extra space character
* in the middle of square brackets, the argument is
* divided into two items. Fixup for this case, if an
* item contains sub string "[sp,", need to concatenate
* the two items.
*/
if (strstr(args[i], "[sp,") && (i+1) < args_count) {
err = asprintf(&arg, "%s %s", args[i], args[i+1]);
i += 2;
} else {
err = asprintf(&arg, "%s", args[i]);
i += 1;
}
/* Failed to allocate memory */
if (err < 0) {
argv_free(args);
goto error;
}
if (synthesize_sdt_probe_arg(&buf, arg_idx, arg) < 0) {
free(arg);
argv_free(args);
goto error;
}
free(arg);
arg_idx++;
}
argv_free(args);
}
out:
ret = strbuf_detach(&buf, NULL);
error:
strbuf_release(&buf);
return ret;
}
int probe_cache__scan_sdt(struct probe_cache *pcache, const char *pathname)
{
struct probe_cache_entry *entry = NULL;
struct list_head sdtlist;
struct sdt_note *note;
char *buf;
char sdtgrp[64];
int ret;
INIT_LIST_HEAD(&sdtlist);
ret = get_sdt_note_list(&sdtlist, pathname);
if (ret < 0) {
pr_debug4("Failed to get sdt note: %d\n", ret);
return ret;
}
list_for_each_entry(note, &sdtlist, note_list) {
ret = snprintf(sdtgrp, 64, "sdt_%s", note->provider);
if (ret < 0)
break;
/* Try to find same-name entry */
entry = probe_cache__find_by_name(pcache, sdtgrp, note->name);
if (!entry) {
entry = probe_cache_entry__new(NULL);
if (!entry) {
ret = -ENOMEM;
break;
}
entry->sdt = true;
ret = asprintf(&entry->spev, "%s:%s=%s", sdtgrp,
note->name, note->name);
if (ret < 0)
break;
entry->pev.event = strdup(note->name);
entry->pev.group = strdup(sdtgrp);
list_add_tail(&entry->node, &pcache->entries);
}
buf = synthesize_sdt_probe_command(note, pathname, sdtgrp);
if (!buf) {
ret = -ENOMEM;
break;
}
ret = strlist__add(entry->tevlist, buf);
free(buf);
entry = NULL;
if (ret == -ENOMEM) {
pr_err("strlist__add failed with -ENOMEM\n");
break;
}
}
if (entry) {
list_del_init(&entry->node);
probe_cache_entry__delete(entry);
}
cleanup_sdt_note_list(&sdtlist);
return ret;
}
#endif
static int probe_cache_entry__write(struct probe_cache_entry *entry, int fd)
{
struct str_node *snode;
struct stat st;
struct iovec iov[3];
const char *prefix = entry->sdt ? "%" : "#";
int ret;
/* Save stat for rollback */
ret = fstat(fd, &st);
if (ret < 0)
return ret;
pr_debug("Writing cache: %s%s\n", prefix, entry->spev);
iov[0].iov_base = (void *)prefix; iov[0].iov_len = 1;
iov[1].iov_base = entry->spev; iov[1].iov_len = strlen(entry->spev);
iov[2].iov_base = (void *)"\n"; iov[2].iov_len = 1;
ret = writev(fd, iov, 3);
if (ret < (int)iov[1].iov_len + 2)
goto rollback;
strlist__for_each_entry(snode, entry->tevlist) {
iov[0].iov_base = (void *)snode->s;
iov[0].iov_len = strlen(snode->s);
iov[1].iov_base = (void *)"\n"; iov[1].iov_len = 1;
ret = writev(fd, iov, 2);
if (ret < (int)iov[0].iov_len + 1)
goto rollback;
}
return 0;
rollback:
/* Rollback to avoid cache file corruption */
if (ret > 0)
ret = -1;
if (ftruncate(fd, st.st_size) < 0)
ret = -2;
return ret;
}
int probe_cache__commit(struct probe_cache *pcache)
{
struct probe_cache_entry *entry;
int ret = 0;
/* TBD: if we do not update existing entries, skip it */
ret = lseek(pcache->fd, 0, SEEK_SET);
if (ret < 0)
goto out;
ret = ftruncate(pcache->fd, 0);
if (ret < 0)
goto out;
for_each_probe_cache_entry(entry, pcache) {
ret = probe_cache_entry__write(entry, pcache->fd);
pr_debug("Cache committed: %d\n", ret);
if (ret < 0)
break;
}
out:
return ret;
}
static bool probe_cache_entry__compare(struct probe_cache_entry *entry,
struct strfilter *filter)
{
char buf[128], *ptr = entry->spev;
if (entry->pev.event) {
snprintf(buf, 128, "%s:%s", entry->pev.group, entry->pev.event);
ptr = buf;
}
return strfilter__compare(filter, ptr);
}
int probe_cache__filter_purge(struct probe_cache *pcache,
struct strfilter *filter)
{
struct probe_cache_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &pcache->entries, node) {
if (probe_cache_entry__compare(entry, filter)) {
pr_info("Removed cached event: %s\n", entry->spev);
list_del_init(&entry->node);
probe_cache_entry__delete(entry);
}
}
return 0;
}
static int probe_cache__show_entries(struct probe_cache *pcache,
struct strfilter *filter)
{
struct probe_cache_entry *entry;
for_each_probe_cache_entry(entry, pcache) {
if (probe_cache_entry__compare(entry, filter))
printf("%s\n", entry->spev);
}
return 0;
}
/* Show all cached probes */
int probe_cache__show_all_caches(struct strfilter *filter)
{
struct probe_cache *pcache;
struct strlist *bidlist;
struct str_node *nd;
char *buf = strfilter__string(filter);
pr_debug("list cache with filter: %s\n", buf);
free(buf);
bidlist = build_id_cache__list_all(true);
if (!bidlist) {
pr_debug("Failed to get buildids: %d\n", errno);
return -EINVAL;
}
strlist__for_each_entry(nd, bidlist) {
pcache = probe_cache__new(nd->s, NULL);
if (!pcache)
continue;
if (!list_empty(&pcache->entries)) {
buf = build_id_cache__origname(nd->s);
printf("%s (%s):\n", buf, nd->s);
free(buf);
probe_cache__show_entries(pcache, filter);
}
probe_cache__delete(pcache);
}
strlist__delete(bidlist);
return 0;
}
enum ftrace_readme {
FTRACE_README_PROBE_TYPE_X = 0,
FTRACE_README_KRETPROBE_OFFSET,
FTRACE_README_UPROBE_REF_CTR,
FTRACE_README_USER_ACCESS,
FTRACE_README_MULTIPROBE_EVENT,
FTRACE_README_IMMEDIATE_VALUE,
FTRACE_README_END,
};
static struct {
const char *pattern;
bool avail;
} ftrace_readme_table[] = {
#define DEFINE_TYPE(idx, pat) \
[idx] = {.pattern = pat, .avail = false}
DEFINE_TYPE(FTRACE_README_PROBE_TYPE_X, "*type: * x8/16/32/64,*"),
DEFINE_TYPE(FTRACE_README_KRETPROBE_OFFSET, "*place (kretprobe): *"),
DEFINE_TYPE(FTRACE_README_UPROBE_REF_CTR, "*ref_ctr_offset*"),
DEFINE_TYPE(FTRACE_README_USER_ACCESS, "*u]<offset>*"),
DEFINE_TYPE(FTRACE_README_MULTIPROBE_EVENT, "*Create/append/*"),
DEFINE_TYPE(FTRACE_README_IMMEDIATE_VALUE, "*\\imm-value,*"),
};
static bool scan_ftrace_readme(enum ftrace_readme type)
{
int fd;
FILE *fp;
char *buf = NULL;
size_t len = 0;
bool ret = false;
static bool scanned = false;
if (scanned)
goto result;
fd = open_trace_file("README", false);
if (fd < 0)
return ret;
fp = fdopen(fd, "r");
if (!fp) {
close(fd);
return ret;
}
while (getline(&buf, &len, fp) > 0)
for (enum ftrace_readme i = 0; i < FTRACE_README_END; i++)
if (!ftrace_readme_table[i].avail)
ftrace_readme_table[i].avail =
strglobmatch(buf, ftrace_readme_table[i].pattern);
scanned = true;
fclose(fp);
free(buf);
result:
if (type >= FTRACE_README_END)
return false;
return ftrace_readme_table[type].avail;
}
bool probe_type_is_available(enum probe_type type)
{
if (type >= PROBE_TYPE_END)
return false;
else if (type == PROBE_TYPE_X)
return scan_ftrace_readme(FTRACE_README_PROBE_TYPE_X);
return true;
}
bool kretprobe_offset_is_supported(void)
{
return scan_ftrace_readme(FTRACE_README_KRETPROBE_OFFSET);
}
bool uprobe_ref_ctr_is_supported(void)
{
return scan_ftrace_readme(FTRACE_README_UPROBE_REF_CTR);
}
bool user_access_is_supported(void)
{
return scan_ftrace_readme(FTRACE_README_USER_ACCESS);
}
bool multiprobe_event_is_supported(void)
{
return scan_ftrace_readme(FTRACE_README_MULTIPROBE_EVENT);
}
bool immediate_value_is_supported(void)
{
return scan_ftrace_readme(FTRACE_README_IMMEDIATE_VALUE);
}
| linux-master | tools/perf/util/probe-file.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* auxtrace.c: AUX area trace support
* Copyright (c) 2013-2015, Intel Corporation.
*/
#include <inttypes.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <stdbool.h>
#include <string.h>
#include <limits.h>
#include <errno.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <sys/param.h>
#include <stdlib.h>
#include <stdio.h>
#include <linux/list.h>
#include <linux/zalloc.h>
#include "config.h"
#include "evlist.h"
#include "dso.h"
#include "map.h"
#include "pmu.h"
#include "evsel.h"
#include "evsel_config.h"
#include "symbol.h"
#include "util/perf_api_probe.h"
#include "util/synthetic-events.h"
#include "thread_map.h"
#include "asm/bug.h"
#include "auxtrace.h"
#include <linux/hash.h>
#include "event.h"
#include "record.h"
#include "session.h"
#include "debug.h"
#include <subcmd/parse-options.h>
#include "cs-etm.h"
#include "intel-pt.h"
#include "intel-bts.h"
#include "arm-spe.h"
#include "hisi-ptt.h"
#include "s390-cpumsf.h"
#include "util/mmap.h"
#include <linux/ctype.h>
#include "symbol/kallsyms.h"
#include <internal/lib.h>
#include "util/sample.h"
/*
* Make a group from 'leader' to 'last', requiring that the events were not
* already grouped to a different leader.
*/
static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
{
struct evsel *evsel;
bool grp;
if (!evsel__is_group_leader(leader))
return -EINVAL;
grp = false;
evlist__for_each_entry(evlist, evsel) {
if (grp) {
if (!(evsel__leader(evsel) == leader ||
(evsel__leader(evsel) == evsel &&
evsel->core.nr_members <= 1)))
return -EINVAL;
} else if (evsel == leader) {
grp = true;
}
if (evsel == last)
break;
}
grp = false;
evlist__for_each_entry(evlist, evsel) {
if (grp) {
if (!evsel__has_leader(evsel, leader)) {
evsel__set_leader(evsel, leader);
if (leader->core.nr_members < 1)
leader->core.nr_members = 1;
leader->core.nr_members += 1;
}
} else if (evsel == leader) {
grp = true;
}
if (evsel == last)
break;
}
return 0;
}
static bool auxtrace__dont_decode(struct perf_session *session)
{
return !session->itrace_synth_opts ||
session->itrace_synth_opts->dont_decode;
}
int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
struct auxtrace_mmap_params *mp,
void *userpg, int fd)
{
struct perf_event_mmap_page *pc = userpg;
WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
mm->userpg = userpg;
mm->mask = mp->mask;
mm->len = mp->len;
mm->prev = 0;
mm->idx = mp->idx;
mm->tid = mp->tid;
mm->cpu = mp->cpu.cpu;
if (!mp->len || !mp->mmap_needed) {
mm->base = NULL;
return 0;
}
pc->aux_offset = mp->offset;
pc->aux_size = mp->len;
mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
if (mm->base == MAP_FAILED) {
pr_debug2("failed to mmap AUX area\n");
mm->base = NULL;
return -1;
}
return 0;
}
void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
{
if (mm->base) {
munmap(mm->base, mm->len);
mm->base = NULL;
}
}
void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
off_t auxtrace_offset,
unsigned int auxtrace_pages,
bool auxtrace_overwrite)
{
if (auxtrace_pages) {
mp->offset = auxtrace_offset;
mp->len = auxtrace_pages * (size_t)page_size;
mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
pr_debug2("AUX area mmap length %zu\n", mp->len);
} else {
mp->len = 0;
}
}
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct evlist *evlist,
struct evsel *evsel, int idx)
{
bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
mp->mmap_needed = evsel->needs_auxtrace_mmap;
if (!mp->mmap_needed)
return;
mp->idx = idx;
if (per_cpu) {
mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else
mp->tid = -1;
} else {
mp->cpu.cpu = -1;
mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
}
}
#define AUXTRACE_INIT_NR_QUEUES 32
static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
{
struct auxtrace_queue *queue_array;
unsigned int max_nr_queues, i;
max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
if (nr_queues > max_nr_queues)
return NULL;
queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
if (!queue_array)
return NULL;
for (i = 0; i < nr_queues; i++) {
INIT_LIST_HEAD(&queue_array[i].head);
queue_array[i].priv = NULL;
}
return queue_array;
}
int auxtrace_queues__init(struct auxtrace_queues *queues)
{
queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
if (!queues->queue_array)
return -ENOMEM;
return 0;
}
static int auxtrace_queues__grow(struct auxtrace_queues *queues,
unsigned int new_nr_queues)
{
unsigned int nr_queues = queues->nr_queues;
struct auxtrace_queue *queue_array;
unsigned int i;
if (!nr_queues)
nr_queues = AUXTRACE_INIT_NR_QUEUES;
while (nr_queues && nr_queues < new_nr_queues)
nr_queues <<= 1;
if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
return -EINVAL;
queue_array = auxtrace_alloc_queue_array(nr_queues);
if (!queue_array)
return -ENOMEM;
for (i = 0; i < queues->nr_queues; i++) {
list_splice_tail(&queues->queue_array[i].head,
&queue_array[i].head);
queue_array[i].tid = queues->queue_array[i].tid;
queue_array[i].cpu = queues->queue_array[i].cpu;
queue_array[i].set = queues->queue_array[i].set;
queue_array[i].priv = queues->queue_array[i].priv;
}
queues->nr_queues = nr_queues;
queues->queue_array = queue_array;
return 0;
}
static void *auxtrace_copy_data(u64 size, struct perf_session *session)
{
int fd = perf_data__fd(session->data);
void *p;
ssize_t ret;
if (size > SSIZE_MAX)
return NULL;
p = malloc(size);
if (!p)
return NULL;
ret = readn(fd, p, size);
if (ret != (ssize_t)size) {
free(p);
return NULL;
}
return p;
}
static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
unsigned int idx,
struct auxtrace_buffer *buffer)
{
struct auxtrace_queue *queue;
int err;
if (idx >= queues->nr_queues) {
err = auxtrace_queues__grow(queues, idx + 1);
if (err)
return err;
}
queue = &queues->queue_array[idx];
if (!queue->set) {
queue->set = true;
queue->tid = buffer->tid;
queue->cpu = buffer->cpu.cpu;
}
buffer->buffer_nr = queues->next_buffer_nr++;
list_add_tail(&buffer->list, &queue->head);
queues->new_data = true;
queues->populated = true;
return 0;
}
/* Limit buffers to 32MiB on 32-bit */
#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
unsigned int idx,
struct auxtrace_buffer *buffer)
{
u64 sz = buffer->size;
bool consecutive = false;
struct auxtrace_buffer *b;
int err;
while (sz > BUFFER_LIMIT_FOR_32_BIT) {
b = memdup(buffer, sizeof(struct auxtrace_buffer));
if (!b)
return -ENOMEM;
b->size = BUFFER_LIMIT_FOR_32_BIT;
b->consecutive = consecutive;
err = auxtrace_queues__queue_buffer(queues, idx, b);
if (err) {
auxtrace_buffer__free(b);
return err;
}
buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
sz -= BUFFER_LIMIT_FOR_32_BIT;
consecutive = true;
}
buffer->size = sz;
buffer->consecutive = consecutive;
return 0;
}
static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu)
{
unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap);
}
static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
struct perf_session *session,
unsigned int idx,
struct auxtrace_buffer *buffer,
struct auxtrace_buffer **buffer_ptr)
{
int err = -ENOMEM;
if (filter_cpu(session, buffer->cpu))
return 0;
buffer = memdup(buffer, sizeof(*buffer));
if (!buffer)
return -ENOMEM;
if (session->one_mmap) {
buffer->data = buffer->data_offset - session->one_mmap_offset +
session->one_mmap_addr;
} else if (perf_data__is_pipe(session->data)) {
buffer->data = auxtrace_copy_data(buffer->size, session);
if (!buffer->data)
goto out_free;
buffer->data_needs_freeing = true;
} else if (BITS_PER_LONG == 32 &&
buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
err = auxtrace_queues__split_buffer(queues, idx, buffer);
if (err)
goto out_free;
}
err = auxtrace_queues__queue_buffer(queues, idx, buffer);
if (err)
goto out_free;
/* FIXME: Doesn't work for split buffer */
if (buffer_ptr)
*buffer_ptr = buffer;
return 0;
out_free:
auxtrace_buffer__free(buffer);
return err;
}
int auxtrace_queues__add_event(struct auxtrace_queues *queues,
struct perf_session *session,
union perf_event *event, off_t data_offset,
struct auxtrace_buffer **buffer_ptr)
{
struct auxtrace_buffer buffer = {
.pid = -1,
.tid = event->auxtrace.tid,
.cpu = { event->auxtrace.cpu },
.data_offset = data_offset,
.offset = event->auxtrace.offset,
.reference = event->auxtrace.reference,
.size = event->auxtrace.size,
};
unsigned int idx = event->auxtrace.idx;
return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
buffer_ptr);
}
static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
struct perf_session *session,
off_t file_offset, size_t sz)
{
union perf_event *event;
int err;
char buf[PERF_SAMPLE_MAX_SIZE];
err = perf_session__peek_event(session, file_offset, buf,
PERF_SAMPLE_MAX_SIZE, &event, NULL);
if (err)
return err;
if (event->header.type == PERF_RECORD_AUXTRACE) {
if (event->header.size < sizeof(struct perf_record_auxtrace) ||
event->header.size != sz) {
err = -EINVAL;
goto out;
}
file_offset += event->header.size;
err = auxtrace_queues__add_event(queues, session, event,
file_offset, NULL);
}
out:
return err;
}
void auxtrace_queues__free(struct auxtrace_queues *queues)
{
unsigned int i;
for (i = 0; i < queues->nr_queues; i++) {
while (!list_empty(&queues->queue_array[i].head)) {
struct auxtrace_buffer *buffer;
buffer = list_entry(queues->queue_array[i].head.next,
struct auxtrace_buffer, list);
list_del_init(&buffer->list);
auxtrace_buffer__free(buffer);
}
}
zfree(&queues->queue_array);
queues->nr_queues = 0;
}
static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
unsigned int pos, unsigned int queue_nr,
u64 ordinal)
{
unsigned int parent;
while (pos) {
parent = (pos - 1) >> 1;
if (heap_array[parent].ordinal <= ordinal)
break;
heap_array[pos] = heap_array[parent];
pos = parent;
}
heap_array[pos].queue_nr = queue_nr;
heap_array[pos].ordinal = ordinal;
}
int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
u64 ordinal)
{
struct auxtrace_heap_item *heap_array;
if (queue_nr >= heap->heap_sz) {
unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
while (heap_sz <= queue_nr)
heap_sz <<= 1;
heap_array = realloc(heap->heap_array,
heap_sz * sizeof(struct auxtrace_heap_item));
if (!heap_array)
return -ENOMEM;
heap->heap_array = heap_array;
heap->heap_sz = heap_sz;
}
auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
return 0;
}
void auxtrace_heap__free(struct auxtrace_heap *heap)
{
zfree(&heap->heap_array);
heap->heap_cnt = 0;
heap->heap_sz = 0;
}
void auxtrace_heap__pop(struct auxtrace_heap *heap)
{
unsigned int pos, last, heap_cnt = heap->heap_cnt;
struct auxtrace_heap_item *heap_array;
if (!heap_cnt)
return;
heap->heap_cnt -= 1;
heap_array = heap->heap_array;
pos = 0;
while (1) {
unsigned int left, right;
left = (pos << 1) + 1;
if (left >= heap_cnt)
break;
right = left + 1;
if (right >= heap_cnt) {
heap_array[pos] = heap_array[left];
return;
}
if (heap_array[left].ordinal < heap_array[right].ordinal) {
heap_array[pos] = heap_array[left];
pos = left;
} else {
heap_array[pos] = heap_array[right];
pos = right;
}
}
last = heap_cnt - 1;
auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
heap_array[last].ordinal);
}
size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
struct evlist *evlist)
{
if (itr)
return itr->info_priv_size(itr, evlist);
return 0;
}
static int auxtrace_not_supported(void)
{
pr_err("AUX area tracing is not supported on this architecture\n");
return -EINVAL;
}
int auxtrace_record__info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct perf_record_auxtrace_info *auxtrace_info,
size_t priv_size)
{
if (itr)
return itr->info_fill(itr, session, auxtrace_info, priv_size);
return auxtrace_not_supported();
}
void auxtrace_record__free(struct auxtrace_record *itr)
{
if (itr)
itr->free(itr);
}
int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
{
if (itr && itr->snapshot_start)
return itr->snapshot_start(itr);
return 0;
}
int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
{
if (!on_exit && itr && itr->snapshot_finish)
return itr->snapshot_finish(itr);
return 0;
}
int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
struct auxtrace_mmap *mm,
unsigned char *data, u64 *head, u64 *old)
{
if (itr && itr->find_snapshot)
return itr->find_snapshot(itr, idx, mm, data, head, old);
return 0;
}
int auxtrace_record__options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
if (itr) {
itr->evlist = evlist;
return itr->recording_options(itr, evlist, opts);
}
return 0;
}
u64 auxtrace_record__reference(struct auxtrace_record *itr)
{
if (itr)
return itr->reference(itr);
return 0;
}
int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
struct record_opts *opts, const char *str)
{
if (!str)
return 0;
/* PMU-agnostic options */
switch (*str) {
case 'e':
opts->auxtrace_snapshot_on_exit = true;
str++;
break;
default:
break;
}
if (itr && itr->parse_snapshot_options)
return itr->parse_snapshot_options(itr, opts, str);
pr_err("No AUX area tracing to snapshot\n");
return -EINVAL;
}
static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{
bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
if (per_cpu_mmaps) {
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
int cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
if (cpu_map_idx == -1)
return -EINVAL;
return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
}
return perf_evsel__enable_thread(&evsel->core, idx);
}
int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
{
struct evsel *evsel;
if (!itr->evlist || !itr->pmu)
return -EINVAL;
evlist__for_each_entry(itr->evlist, evsel) {
if (evsel->core.attr.type == itr->pmu->type) {
if (evsel->disabled)
return 0;
return evlist__enable_event_idx(itr->evlist, evsel, idx);
}
}
return -EINVAL;
}
/*
* Event record size is 16-bit which results in a maximum size of about 64KiB.
* Allow about 4KiB for the rest of the sample record, to give a maximum
* AUX area sample size of 60KiB.
*/
#define MAX_AUX_SAMPLE_SIZE (60 * 1024)
/* Arbitrary default size if no other default provided */
#define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
struct record_opts *opts)
{
struct evsel *evsel;
bool has_aux_leader = false;
u32 sz;
evlist__for_each_entry(evlist, evsel) {
sz = evsel->core.attr.aux_sample_size;
if (evsel__is_group_leader(evsel)) {
has_aux_leader = evsel__is_aux_event(evsel);
if (sz) {
if (has_aux_leader)
pr_err("Cannot add AUX area sampling to an AUX area event\n");
else
pr_err("Cannot add AUX area sampling to a group leader\n");
return -EINVAL;
}
}
if (sz > MAX_AUX_SAMPLE_SIZE) {
pr_err("AUX area sample size %u too big, max. %d\n",
sz, MAX_AUX_SAMPLE_SIZE);
return -EINVAL;
}
if (sz) {
if (!has_aux_leader) {
pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
return -EINVAL;
}
evsel__set_sample_bit(evsel, AUX);
opts->auxtrace_sample_mode = true;
} else {
evsel__reset_sample_bit(evsel, AUX);
}
}
if (!opts->auxtrace_sample_mode) {
pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
return -EINVAL;
}
if (!perf_can_aux_sample()) {
pr_err("AUX area sampling is not supported by kernel\n");
return -EINVAL;
}
return 0;
}
int auxtrace_parse_sample_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts, const char *str)
{
struct evsel_config_term *term;
struct evsel *aux_evsel;
bool has_aux_sample_size = false;
bool has_aux_leader = false;
struct evsel *evsel;
char *endptr;
unsigned long sz;
if (!str)
goto no_opt;
if (!itr) {
pr_err("No AUX area event to sample\n");
return -EINVAL;
}
sz = strtoul(str, &endptr, 0);
if (*endptr || sz > UINT_MAX) {
pr_err("Bad AUX area sampling option: '%s'\n", str);
return -EINVAL;
}
if (!sz)
sz = itr->default_aux_sample_size;
if (!sz)
sz = DEFAULT_AUX_SAMPLE_SIZE;
/* Set aux_sample_size based on --aux-sample option */
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_group_leader(evsel)) {
has_aux_leader = evsel__is_aux_event(evsel);
} else if (has_aux_leader) {
evsel->core.attr.aux_sample_size = sz;
}
}
no_opt:
aux_evsel = NULL;
/* Override with aux_sample_size from config term */
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_aux_event(evsel))
aux_evsel = evsel;
term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
if (term) {
has_aux_sample_size = true;
evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
/* If possible, group with the AUX event */
if (aux_evsel && evsel->core.attr.aux_sample_size)
evlist__regroup(evlist, aux_evsel, evsel);
}
}
if (!str && !has_aux_sample_size)
return 0;
if (!itr) {
pr_err("No AUX area event to sample\n");
return -EINVAL;
}
return auxtrace_validate_aux_sample_size(evlist, opts);
}
void auxtrace_regroup_aux_output(struct evlist *evlist)
{
struct evsel *evsel, *aux_evsel = NULL;
struct evsel_config_term *term;
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_aux_event(evsel))
aux_evsel = evsel;
term = evsel__get_config_term(evsel, AUX_OUTPUT);
/* If possible, group with the AUX event */
if (term && aux_evsel)
evlist__regroup(evlist, aux_evsel, evsel);
}
}
struct auxtrace_record *__weak
auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
{
*err = 0;
return NULL;
}
static int auxtrace_index__alloc(struct list_head *head)
{
struct auxtrace_index *auxtrace_index;
auxtrace_index = malloc(sizeof(struct auxtrace_index));
if (!auxtrace_index)
return -ENOMEM;
auxtrace_index->nr = 0;
INIT_LIST_HEAD(&auxtrace_index->list);
list_add_tail(&auxtrace_index->list, head);
return 0;
}
void auxtrace_index__free(struct list_head *head)
{
struct auxtrace_index *auxtrace_index, *n;
list_for_each_entry_safe(auxtrace_index, n, head, list) {
list_del_init(&auxtrace_index->list);
free(auxtrace_index);
}
}
static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
{
struct auxtrace_index *auxtrace_index;
int err;
if (list_empty(head)) {
err = auxtrace_index__alloc(head);
if (err)
return NULL;
}
auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
err = auxtrace_index__alloc(head);
if (err)
return NULL;
auxtrace_index = list_entry(head->prev, struct auxtrace_index,
list);
}
return auxtrace_index;
}
int auxtrace_index__auxtrace_event(struct list_head *head,
union perf_event *event, off_t file_offset)
{
struct auxtrace_index *auxtrace_index;
size_t nr;
auxtrace_index = auxtrace_index__last(head);
if (!auxtrace_index)
return -ENOMEM;
nr = auxtrace_index->nr;
auxtrace_index->entries[nr].file_offset = file_offset;
auxtrace_index->entries[nr].sz = event->header.size;
auxtrace_index->nr += 1;
return 0;
}
static int auxtrace_index__do_write(int fd,
struct auxtrace_index *auxtrace_index)
{
struct auxtrace_index_entry ent;
size_t i;
for (i = 0; i < auxtrace_index->nr; i++) {
ent.file_offset = auxtrace_index->entries[i].file_offset;
ent.sz = auxtrace_index->entries[i].sz;
if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
return -errno;
}
return 0;
}
int auxtrace_index__write(int fd, struct list_head *head)
{
struct auxtrace_index *auxtrace_index;
u64 total = 0;
int err;
list_for_each_entry(auxtrace_index, head, list)
total += auxtrace_index->nr;
if (writen(fd, &total, sizeof(total)) != sizeof(total))
return -errno;
list_for_each_entry(auxtrace_index, head, list) {
err = auxtrace_index__do_write(fd, auxtrace_index);
if (err)
return err;
}
return 0;
}
static int auxtrace_index__process_entry(int fd, struct list_head *head,
bool needs_swap)
{
struct auxtrace_index *auxtrace_index;
struct auxtrace_index_entry ent;
size_t nr;
if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
return -1;
auxtrace_index = auxtrace_index__last(head);
if (!auxtrace_index)
return -1;
nr = auxtrace_index->nr;
if (needs_swap) {
auxtrace_index->entries[nr].file_offset =
bswap_64(ent.file_offset);
auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
} else {
auxtrace_index->entries[nr].file_offset = ent.file_offset;
auxtrace_index->entries[nr].sz = ent.sz;
}
auxtrace_index->nr = nr + 1;
return 0;
}
int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
bool needs_swap)
{
struct list_head *head = &session->auxtrace_index;
u64 nr;
if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
return -1;
if (needs_swap)
nr = bswap_64(nr);
if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
return -1;
while (nr--) {
int err;
err = auxtrace_index__process_entry(fd, head, needs_swap);
if (err)
return -1;
}
return 0;
}
static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
struct perf_session *session,
struct auxtrace_index_entry *ent)
{
return auxtrace_queues__add_indexed_event(queues, session,
ent->file_offset, ent->sz);
}
int auxtrace_queues__process_index(struct auxtrace_queues *queues,
struct perf_session *session)
{
struct auxtrace_index *auxtrace_index;
struct auxtrace_index_entry *ent;
size_t i;
int err;
if (auxtrace__dont_decode(session))
return 0;
list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
for (i = 0; i < auxtrace_index->nr; i++) {
ent = &auxtrace_index->entries[i];
err = auxtrace_queues__process_index_entry(queues,
session,
ent);
if (err)
return err;
}
}
return 0;
}
struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
struct auxtrace_buffer *buffer)
{
if (buffer) {
if (list_is_last(&buffer->list, &queue->head))
return NULL;
return list_entry(buffer->list.next, struct auxtrace_buffer,
list);
} else {
if (list_empty(&queue->head))
return NULL;
return list_entry(queue->head.next, struct auxtrace_buffer,
list);
}
}
struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
struct perf_sample *sample,
struct perf_session *session)
{
struct perf_sample_id *sid;
unsigned int idx;
u64 id;
id = sample->id;
if (!id)
return NULL;
sid = evlist__id2sid(session->evlist, id);
if (!sid)
return NULL;
idx = sid->idx;
if (idx >= queues->nr_queues)
return NULL;
return &queues->queue_array[idx];
}
int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
struct perf_session *session,
struct perf_sample *sample, u64 data_offset,
u64 reference)
{
struct auxtrace_buffer buffer = {
.pid = -1,
.data_offset = data_offset,
.reference = reference,
.size = sample->aux_sample.size,
};
struct perf_sample_id *sid;
u64 id = sample->id;
unsigned int idx;
if (!id)
return -EINVAL;
sid = evlist__id2sid(session->evlist, id);
if (!sid)
return -ENOENT;
idx = sid->idx;
buffer.tid = sid->tid;
buffer.cpu = sid->cpu;
return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
}
struct queue_data {
bool samples;
bool events;
};
static int auxtrace_queue_data_cb(struct perf_session *session,
union perf_event *event, u64 offset,
void *data)
{
struct queue_data *qd = data;
struct perf_sample sample;
int err;
if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
if (event->header.size < sizeof(struct perf_record_auxtrace))
return -EINVAL;
offset += event->header.size;
return session->auxtrace->queue_data(session, NULL, event,
offset);
}
if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
return 0;
err = evlist__parse_sample(session->evlist, event, &sample);
if (err)
return err;
if (!sample.aux_sample.size)
return 0;
offset += sample.aux_sample.data - (void *)event;
return session->auxtrace->queue_data(session, &sample, NULL, offset);
}
int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
{
struct queue_data qd = {
.samples = samples,
.events = events,
};
if (auxtrace__dont_decode(session))
return 0;
if (perf_data__is_pipe(session->data))
return 0;
if (!session->auxtrace || !session->auxtrace->queue_data)
return -EINVAL;
return perf_session__peek_events(session, session->header.data_offset,
session->header.data_size,
auxtrace_queue_data_cb, &qd);
}
void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
{
int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
size_t adj = buffer->data_offset & (page_size - 1);
size_t size = buffer->size + adj;
off_t file_offset = buffer->data_offset - adj;
void *addr;
if (buffer->data)
return buffer->data;
addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
if (addr == MAP_FAILED)
return NULL;
buffer->mmap_addr = addr;
buffer->mmap_size = size;
buffer->data = addr + adj;
return buffer->data;
}
void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
{
if (!buffer->data || !buffer->mmap_addr)
return;
munmap(buffer->mmap_addr, buffer->mmap_size);
buffer->mmap_addr = NULL;
buffer->mmap_size = 0;
buffer->data = NULL;
buffer->use_data = NULL;
}
void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
{
auxtrace_buffer__put_data(buffer);
if (buffer->data_needs_freeing) {
buffer->data_needs_freeing = false;
zfree(&buffer->data);
buffer->use_data = NULL;
buffer->size = 0;
}
}
void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
{
auxtrace_buffer__drop_data(buffer);
free(buffer);
}
void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg, u64 timestamp,
pid_t machine_pid, int vcpu)
{
size_t size;
memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
auxtrace_error->type = type;
auxtrace_error->code = code;
auxtrace_error->cpu = cpu;
auxtrace_error->pid = pid;
auxtrace_error->tid = tid;
auxtrace_error->fmt = 1;
auxtrace_error->ip = ip;
auxtrace_error->time = timestamp;
strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
if (machine_pid) {
auxtrace_error->fmt = 2;
auxtrace_error->machine_pid = machine_pid;
auxtrace_error->vcpu = vcpu;
size = sizeof(*auxtrace_error);
} else {
size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
strlen(auxtrace_error->msg) + 1;
}
auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
}
void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg, u64 timestamp)
{
auxtrace_synth_guest_error(auxtrace_error, type, code, cpu, pid, tid,
ip, msg, timestamp, 0, -1);
}
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process)
{
union perf_event *ev;
size_t priv_size;
int err;
pr_debug2("Synthesizing auxtrace information\n");
priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
if (!ev)
return -ENOMEM;
ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
priv_size;
err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
priv_size);
if (err)
goto out_free;
err = process(tool, ev, NULL, NULL);
out_free:
free(ev);
return err;
}
static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
{
struct evsel *new_leader = NULL;
struct evsel *evsel;
/* Find new leader for the group */
evlist__for_each_entry(evlist, evsel) {
if (!evsel__has_leader(evsel, leader) || evsel == leader)
continue;
if (!new_leader)
new_leader = evsel;
evsel__set_leader(evsel, new_leader);
}
/* Update group information */
if (new_leader) {
zfree(&new_leader->group_name);
new_leader->group_name = leader->group_name;
leader->group_name = NULL;
new_leader->core.nr_members = leader->core.nr_members - 1;
leader->core.nr_members = 1;
}
}
static void unleader_auxtrace(struct perf_session *session)
{
struct evsel *evsel;
evlist__for_each_entry(session->evlist, evsel) {
if (auxtrace__evsel_is_auxtrace(session, evsel) &&
evsel__is_group_leader(evsel)) {
unleader_evsel(session->evlist, evsel);
}
}
}
int perf_event__process_auxtrace_info(struct perf_session *session,
union perf_event *event)
{
enum auxtrace_type type = event->auxtrace_info.type;
int err;
if (dump_trace)
fprintf(stdout, " type: %u\n", type);
switch (type) {
case PERF_AUXTRACE_INTEL_PT:
err = intel_pt_process_auxtrace_info(event, session);
break;
case PERF_AUXTRACE_INTEL_BTS:
err = intel_bts_process_auxtrace_info(event, session);
break;
case PERF_AUXTRACE_ARM_SPE:
err = arm_spe_process_auxtrace_info(event, session);
break;
case PERF_AUXTRACE_CS_ETM:
err = cs_etm__process_auxtrace_info(event, session);
break;
case PERF_AUXTRACE_S390_CPUMSF:
err = s390_cpumsf_process_auxtrace_info(event, session);
break;
case PERF_AUXTRACE_HISI_PTT:
err = hisi_ptt_process_auxtrace_info(event, session);
break;
case PERF_AUXTRACE_UNKNOWN:
default:
return -EINVAL;
}
if (err)
return err;
unleader_auxtrace(session);
return 0;
}
s64 perf_event__process_auxtrace(struct perf_session *session,
union perf_event *event)
{
s64 err;
if (dump_trace)
fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
event->auxtrace.size, event->auxtrace.offset,
event->auxtrace.reference, event->auxtrace.idx,
event->auxtrace.tid, event->auxtrace.cpu);
if (auxtrace__dont_decode(session))
return event->auxtrace.size;
if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
return -EINVAL;
err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
if (err < 0)
return err;
return event->auxtrace.size;
}
#define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
#define PERF_ITRACE_DEFAULT_PERIOD 100000
#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
#define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
bool no_sample)
{
synth_opts->branches = true;
synth_opts->transactions = true;
synth_opts->ptwrites = true;
synth_opts->pwr_events = true;
synth_opts->other_events = true;
synth_opts->intr_events = true;
synth_opts->errors = true;
synth_opts->flc = true;
synth_opts->llc = true;
synth_opts->tlb = true;
synth_opts->mem = true;
synth_opts->remote_access = true;
if (no_sample) {
synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
synth_opts->period = 1;
synth_opts->calls = true;
} else {
synth_opts->instructions = true;
synth_opts->cycles = true;
synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
}
synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
synth_opts->initial_skip = 0;
}
static int get_flag(const char **ptr, unsigned int *flags)
{
while (1) {
char c = **ptr;
if (c >= 'a' && c <= 'z') {
*flags |= 1 << (c - 'a');
++*ptr;
return 0;
} else if (c == ' ') {
++*ptr;
continue;
} else {
return -1;
}
}
}
static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
{
while (1) {
switch (**ptr) {
case '+':
++*ptr;
if (get_flag(ptr, plus_flags))
return -1;
break;
case '-':
++*ptr;
if (get_flag(ptr, minus_flags))
return -1;
break;
case ' ':
++*ptr;
break;
default:
return 0;
}
}
}
#define ITRACE_DFLT_LOG_ON_ERROR_SZ 16384
static unsigned int itrace_log_on_error_size(void)
{
unsigned int sz = 0;
perf_config_scan("itrace.debug-log-buffer-size", "%u", &sz);
return sz ?: ITRACE_DFLT_LOG_ON_ERROR_SZ;
}
/*
* Please check tools/perf/Documentation/perf-script.txt for information
* about the options parsed here, which is introduced after this cset,
* when support in 'perf script' for these options is introduced.
*/
int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
const char *str, int unset)
{
const char *p;
char *endptr;
bool period_type_set = false;
bool period_set = false;
synth_opts->set = true;
if (unset) {
synth_opts->dont_decode = true;
return 0;
}
if (!str) {
itrace_synth_opts__set_default(synth_opts,
synth_opts->default_no_sample);
return 0;
}
for (p = str; *p;) {
switch (*p++) {
case 'i':
case 'y':
if (p[-1] == 'y')
synth_opts->cycles = true;
else
synth_opts->instructions = true;
while (*p == ' ' || *p == ',')
p += 1;
if (isdigit(*p)) {
synth_opts->period = strtoull(p, &endptr, 10);
period_set = true;
p = endptr;
while (*p == ' ' || *p == ',')
p += 1;
switch (*p++) {
case 'i':
synth_opts->period_type =
PERF_ITRACE_PERIOD_INSTRUCTIONS;
period_type_set = true;
break;
case 't':
synth_opts->period_type =
PERF_ITRACE_PERIOD_TICKS;
period_type_set = true;
break;
case 'm':
synth_opts->period *= 1000;
/* Fall through */
case 'u':
synth_opts->period *= 1000;
/* Fall through */
case 'n':
if (*p++ != 's')
goto out_err;
synth_opts->period_type =
PERF_ITRACE_PERIOD_NANOSECS;
period_type_set = true;
break;
case '\0':
goto out;
default:
goto out_err;
}
}
break;
case 'b':
synth_opts->branches = true;
break;
case 'x':
synth_opts->transactions = true;
break;
case 'w':
synth_opts->ptwrites = true;
break;
case 'p':
synth_opts->pwr_events = true;
break;
case 'o':
synth_opts->other_events = true;
break;
case 'I':
synth_opts->intr_events = true;
break;
case 'e':
synth_opts->errors = true;
if (get_flags(&p, &synth_opts->error_plus_flags,
&synth_opts->error_minus_flags))
goto out_err;
break;
case 'd':
synth_opts->log = true;
if (get_flags(&p, &synth_opts->log_plus_flags,
&synth_opts->log_minus_flags))
goto out_err;
if (synth_opts->log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR)
synth_opts->log_on_error_size = itrace_log_on_error_size();
break;
case 'c':
synth_opts->branches = true;
synth_opts->calls = true;
break;
case 'r':
synth_opts->branches = true;
synth_opts->returns = true;
break;
case 'G':
case 'g':
if (p[-1] == 'G')
synth_opts->add_callchain = true;
else
synth_opts->callchain = true;
synth_opts->callchain_sz =
PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
while (*p == ' ' || *p == ',')
p += 1;
if (isdigit(*p)) {
unsigned int val;
val = strtoul(p, &endptr, 10);
p = endptr;
if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
goto out_err;
synth_opts->callchain_sz = val;
}
break;
case 'L':
case 'l':
if (p[-1] == 'L')
synth_opts->add_last_branch = true;
else
synth_opts->last_branch = true;
synth_opts->last_branch_sz =
PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
while (*p == ' ' || *p == ',')
p += 1;
if (isdigit(*p)) {
unsigned int val;
val = strtoul(p, &endptr, 10);
p = endptr;
if (!val ||
val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
goto out_err;
synth_opts->last_branch_sz = val;
}
break;
case 's':
synth_opts->initial_skip = strtoul(p, &endptr, 10);
if (p == endptr)
goto out_err;
p = endptr;
break;
case 'f':
synth_opts->flc = true;
break;
case 'm':
synth_opts->llc = true;
break;
case 't':
synth_opts->tlb = true;
break;
case 'a':
synth_opts->remote_access = true;
break;
case 'M':
synth_opts->mem = true;
break;
case 'q':
synth_opts->quick += 1;
break;
case 'A':
synth_opts->approx_ipc = true;
break;
case 'Z':
synth_opts->timeless_decoding = true;
break;
case ' ':
case ',':
break;
default:
goto out_err;
}
}
out:
if (synth_opts->instructions || synth_opts->cycles) {
if (!period_type_set)
synth_opts->period_type =
PERF_ITRACE_DEFAULT_PERIOD_TYPE;
if (!period_set)
synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
}
return 0;
out_err:
pr_err("Bad Instruction Tracing options '%s'\n", str);
return -EINVAL;
}
int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
{
return itrace_do_parse_synth_opts(opt->value, str, unset);
}
static const char * const auxtrace_error_type_name[] = {
[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
};
static const char *auxtrace_error_name(int type)
{
const char *error_type_name = NULL;
if (type < PERF_AUXTRACE_ERROR_MAX)
error_type_name = auxtrace_error_type_name[type];
if (!error_type_name)
error_type_name = "unknown AUX";
return error_type_name;
}
size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
{
struct perf_record_auxtrace_error *e = &event->auxtrace_error;
unsigned long long nsecs = e->time;
const char *msg = e->msg;
int ret;
ret = fprintf(fp, " %s error type %u",
auxtrace_error_name(e->type), e->type);
if (e->fmt && nsecs) {
unsigned long secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
} else {
ret += fprintf(fp, " time 0");
}
if (!e->fmt)
msg = (const char *)&e->time;
if (e->fmt >= 2 && e->machine_pid)
ret += fprintf(fp, " machine_pid %d vcpu %d", e->machine_pid, e->vcpu);
ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
e->cpu, e->pid, e->tid, e->ip, e->code, msg);
return ret;
}
void perf_session__auxtrace_error_inc(struct perf_session *session,
union perf_event *event)
{
struct perf_record_auxtrace_error *e = &event->auxtrace_error;
if (e->type < PERF_AUXTRACE_ERROR_MAX)
session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
}
void events_stats__auxtrace_error_warn(const struct events_stats *stats)
{
int i;
for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
if (!stats->nr_auxtrace_errors[i])
continue;
ui__warning("%u %s errors\n",
stats->nr_auxtrace_errors[i],
auxtrace_error_name(i));
}
}
int perf_event__process_auxtrace_error(struct perf_session *session,
union perf_event *event)
{
if (auxtrace__dont_decode(session))
return 0;
perf_event__fprintf_auxtrace_error(event, stdout);
return 0;
}
/*
* In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode,
* 32-bit perf tool cannot access 64-bit value atomically, which might lead to
* the issues caused by the below sequence on multiple CPUs: when perf tool
* accesses either the load operation or the store operation for 64-bit value,
* on some architectures the operation is divided into two instructions, one
* is for accessing the low 32-bit value and another is for the high 32-bit;
* thus these two user operations can give the kernel chances to access the
* 64-bit value, and thus leads to the unexpected load values.
*
* kernel (64-bit) user (32-bit)
*
* if (LOAD ->aux_tail) { --, LOAD ->aux_head_lo
* STORE $aux_data | ,--->
* FLUSH $aux_data | | LOAD ->aux_head_hi
* STORE ->aux_head --|-------` smp_rmb()
* } | LOAD $data
* | smp_mb()
* | STORE ->aux_tail_lo
* `----------->
* STORE ->aux_tail_hi
*
* For this reason, it's impossible for the perf tool to work correctly when
* the AUX head or tail is bigger than 4GB (more than 32 bits length); and we
* can not simply limit the AUX ring buffer to less than 4GB, the reason is
* the pointers can be increased monotonically, whatever the buffer size it is,
* at the end the head and tail can be bigger than 4GB and carry out to the
* high 32-bit.
*
* To mitigate the issues and improve the user experience, we can allow the
* perf tool working in certain conditions and bail out with error if detect
* any overflow cannot be handled.
*
* For reading the AUX head, it reads out the values for three times, and
* compares the high 4 bytes of the values between the first time and the last
* time, if there has no change for high 4 bytes injected by the kernel during
* the user reading sequence, it's safe for use the second value.
*
* When compat_auxtrace_mmap__write_tail() detects any carrying in the high
* 32 bits, it means there have two store operations in user space and it cannot
* promise the atomicity for 64-bit write, so return '-1' in this case to tell
* the caller an overflow error has happened.
*/
u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
u64 first, second, last;
u64 mask = (u64)(UINT32_MAX) << 32;
do {
first = READ_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
smp_rmb();
second = READ_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
smp_rmb();
last = READ_ONCE(pc->aux_head);
} while ((first & mask) != (last & mask));
return second;
}
int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
{
struct perf_event_mmap_page *pc = mm->userpg;
u64 mask = (u64)(UINT32_MAX) << 32;
if (tail & mask)
return -1;
/* Ensure all reads are done before we write the tail out */
smp_mb();
WRITE_ONCE(pc->aux_tail, tail);
return 0;
}
static int __auxtrace_mmap__read(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
bool snapshot, size_t snapshot_size)
{
struct auxtrace_mmap *mm = &map->auxtrace_mmap;
u64 head, old = mm->prev, offset, ref;
unsigned char *data = mm->base;
size_t size, head_off, old_off, len1, len2, padding;
union perf_event ev;
void *data1, *data2;
int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL));
head = auxtrace_mmap__read_head(mm, kernel_is_64_bit);
if (snapshot &&
auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old))
return -1;
if (old == head)
return 0;
pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
mm->idx, old, head, head - old);
if (mm->mask) {
head_off = head & mm->mask;
old_off = old & mm->mask;
} else {
head_off = head % mm->len;
old_off = old % mm->len;
}
if (head_off > old_off)
size = head_off - old_off;
else
size = mm->len - (old_off - head_off);
if (snapshot && size > snapshot_size)
size = snapshot_size;
ref = auxtrace_record__reference(itr);
if (head > old || size <= head || mm->mask) {
offset = head - size;
} else {
/*
* When the buffer size is not a power of 2, 'head' wraps at the
* highest multiple of the buffer size, so we have to subtract
* the remainder here.
*/
u64 rem = (0ULL - mm->len) % mm->len;
offset = head - size - rem;
}
if (size > head_off) {
len1 = size - head_off;
data1 = &data[mm->len - len1];
len2 = head_off;
data2 = &data[0];
} else {
len1 = size;
data1 = &data[head_off - len1];
len2 = 0;
data2 = NULL;
}
if (itr->alignment) {
unsigned int unwanted = len1 % itr->alignment;
len1 -= unwanted;
size -= unwanted;
}
/* padding must be written by fn() e.g. record__process_auxtrace() */
padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
if (padding)
padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
memset(&ev, 0, sizeof(ev));
ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
ev.auxtrace.header.size = sizeof(ev.auxtrace);
ev.auxtrace.size = size + padding;
ev.auxtrace.offset = offset;
ev.auxtrace.reference = ref;
ev.auxtrace.idx = mm->idx;
ev.auxtrace.tid = mm->tid;
ev.auxtrace.cpu = mm->cpu;
if (fn(tool, map, &ev, data1, len1, data2, len2))
return -1;
mm->prev = head;
if (!snapshot) {
int err;
err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit);
if (err < 0)
return err;
if (itr->read_finish) {
err = itr->read_finish(itr, mm->idx);
if (err < 0)
return err;
}
}
return 1;
}
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn)
{
return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
}
int auxtrace_mmap__read_snapshot(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size)
{
return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
}
/**
* struct auxtrace_cache - hash table to implement a cache
* @hashtable: the hashtable
* @sz: hashtable size (number of hlists)
* @entry_size: size of an entry
* @limit: limit the number of entries to this maximum, when reached the cache
* is dropped and caching begins again with an empty cache
* @cnt: current number of entries
* @bits: hashtable size (@sz = 2^@bits)
*/
struct auxtrace_cache {
struct hlist_head *hashtable;
size_t sz;
size_t entry_size;
size_t limit;
size_t cnt;
unsigned int bits;
};
struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
unsigned int limit_percent)
{
struct auxtrace_cache *c;
struct hlist_head *ht;
size_t sz, i;
c = zalloc(sizeof(struct auxtrace_cache));
if (!c)
return NULL;
sz = 1UL << bits;
ht = calloc(sz, sizeof(struct hlist_head));
if (!ht)
goto out_free;
for (i = 0; i < sz; i++)
INIT_HLIST_HEAD(&ht[i]);
c->hashtable = ht;
c->sz = sz;
c->entry_size = entry_size;
c->limit = (c->sz * limit_percent) / 100;
c->bits = bits;
return c;
out_free:
free(c);
return NULL;
}
static void auxtrace_cache__drop(struct auxtrace_cache *c)
{
struct auxtrace_cache_entry *entry;
struct hlist_node *tmp;
size_t i;
if (!c)
return;
for (i = 0; i < c->sz; i++) {
hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
hlist_del(&entry->hash);
auxtrace_cache__free_entry(c, entry);
}
}
c->cnt = 0;
}
void auxtrace_cache__free(struct auxtrace_cache *c)
{
if (!c)
return;
auxtrace_cache__drop(c);
zfree(&c->hashtable);
free(c);
}
void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
{
return malloc(c->entry_size);
}
void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
void *entry)
{
free(entry);
}
int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
struct auxtrace_cache_entry *entry)
{
if (c->limit && ++c->cnt > c->limit)
auxtrace_cache__drop(c);
entry->key = key;
hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
return 0;
}
static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
u32 key)
{
struct auxtrace_cache_entry *entry;
struct hlist_head *hlist;
struct hlist_node *n;
if (!c)
return NULL;
hlist = &c->hashtable[hash_32(key, c->bits)];
hlist_for_each_entry_safe(entry, n, hlist, hash) {
if (entry->key == key) {
hlist_del(&entry->hash);
return entry;
}
}
return NULL;
}
void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
{
struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
auxtrace_cache__free_entry(c, entry);
}
void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
{
struct auxtrace_cache_entry *entry;
struct hlist_head *hlist;
if (!c)
return NULL;
hlist = &c->hashtable[hash_32(key, c->bits)];
hlist_for_each_entry(entry, hlist, hash) {
if (entry->key == key)
return entry;
}
return NULL;
}
static void addr_filter__free_str(struct addr_filter *filt)
{
zfree(&filt->str);
filt->action = NULL;
filt->sym_from = NULL;
filt->sym_to = NULL;
filt->filename = NULL;
}
static struct addr_filter *addr_filter__new(void)
{
struct addr_filter *filt = zalloc(sizeof(*filt));
if (filt)
INIT_LIST_HEAD(&filt->list);
return filt;
}
static void addr_filter__free(struct addr_filter *filt)
{
if (filt)
addr_filter__free_str(filt);
free(filt);
}
static void addr_filters__add(struct addr_filters *filts,
struct addr_filter *filt)
{
list_add_tail(&filt->list, &filts->head);
filts->cnt += 1;
}
static void addr_filters__del(struct addr_filters *filts,
struct addr_filter *filt)
{
list_del_init(&filt->list);
filts->cnt -= 1;
}
void addr_filters__init(struct addr_filters *filts)
{
INIT_LIST_HEAD(&filts->head);
filts->cnt = 0;
}
void addr_filters__exit(struct addr_filters *filts)
{
struct addr_filter *filt, *n;
list_for_each_entry_safe(filt, n, &filts->head, list) {
addr_filters__del(filts, filt);
addr_filter__free(filt);
}
}
static int parse_num_or_str(char **inp, u64 *num, const char **str,
const char *str_delim)
{
*inp += strspn(*inp, " ");
if (isdigit(**inp)) {
char *endptr;
if (!num)
return -EINVAL;
errno = 0;
*num = strtoull(*inp, &endptr, 0);
if (errno)
return -errno;
if (endptr == *inp)
return -EINVAL;
*inp = endptr;
} else {
size_t n;
if (!str)
return -EINVAL;
*inp += strspn(*inp, " ");
*str = *inp;
n = strcspn(*inp, str_delim);
if (!n)
return -EINVAL;
*inp += n;
if (**inp) {
**inp = '\0';
*inp += 1;
}
}
return 0;
}
static int parse_action(struct addr_filter *filt)
{
if (!strcmp(filt->action, "filter")) {
filt->start = true;
filt->range = true;
} else if (!strcmp(filt->action, "start")) {
filt->start = true;
} else if (!strcmp(filt->action, "stop")) {
filt->start = false;
} else if (!strcmp(filt->action, "tracestop")) {
filt->start = false;
filt->range = true;
filt->action += 5; /* Change 'tracestop' to 'stop' */
} else {
return -EINVAL;
}
return 0;
}
static int parse_sym_idx(char **inp, int *idx)
{
*idx = -1;
*inp += strspn(*inp, " ");
if (**inp != '#')
return 0;
*inp += 1;
if (**inp == 'g' || **inp == 'G') {
*inp += 1;
*idx = 0;
} else {
unsigned long num;
char *endptr;
errno = 0;
num = strtoul(*inp, &endptr, 0);
if (errno)
return -errno;
if (endptr == *inp || num > INT_MAX)
return -EINVAL;
*inp = endptr;
*idx = num;
}
return 0;
}
static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
{
int err = parse_num_or_str(inp, num, str, " ");
if (!err && *str)
err = parse_sym_idx(inp, idx);
return err;
}
static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
{
char *fstr;
int err;
filt->str = fstr = strdup(*filter_inp);
if (!fstr)
return -ENOMEM;
err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
if (err)
goto out_err;
err = parse_action(filt);
if (err)
goto out_err;
err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
&filt->sym_from_idx);
if (err)
goto out_err;
fstr += strspn(fstr, " ");
if (*fstr == '/') {
fstr += 1;
err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
&filt->sym_to_idx);
if (err)
goto out_err;
filt->range = true;
}
fstr += strspn(fstr, " ");
if (*fstr == '@') {
fstr += 1;
err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
if (err)
goto out_err;
}
fstr += strspn(fstr, " ,");
*filter_inp += fstr - filt->str;
return 0;
out_err:
addr_filter__free_str(filt);
return err;
}
int addr_filters__parse_bare_filter(struct addr_filters *filts,
const char *filter)
{
struct addr_filter *filt;
const char *fstr = filter;
int err;
while (*fstr) {
filt = addr_filter__new();
err = parse_one_filter(filt, &fstr);
if (err) {
addr_filter__free(filt);
addr_filters__exit(filts);
return err;
}
addr_filters__add(filts, filt);
}
return 0;
}
struct sym_args {
const char *name;
u64 start;
u64 size;
int idx;
int cnt;
bool started;
bool global;
bool selected;
bool duplicate;
bool near;
};
static bool kern_sym_name_match(const char *kname, const char *name)
{
size_t n = strlen(name);
return !strcmp(kname, name) ||
(!strncmp(kname, name, n) && kname[n] == '\t');
}
static bool kern_sym_match(struct sym_args *args, const char *name, char type)
{
/* A function with the same name, and global or the n'th found or any */
return kallsyms__is_function(type) &&
kern_sym_name_match(name, args->name) &&
((args->global && isupper(type)) ||
(args->selected && ++(args->cnt) == args->idx) ||
(!args->global && !args->selected));
}
static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
{
struct sym_args *args = arg;
if (args->started) {
if (!args->size)
args->size = start - args->start;
if (args->selected) {
if (args->size)
return 1;
} else if (kern_sym_match(args, name, type)) {
args->duplicate = true;
return 1;
}
} else if (kern_sym_match(args, name, type)) {
args->started = true;
args->start = start;
}
return 0;
}
static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
{
struct sym_args *args = arg;
if (kern_sym_match(args, name, type)) {
pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
++args->cnt, start, type, name);
args->near = true;
} else if (args->near) {
args->near = false;
pr_err("\t\twhich is near\t\t%s\n", name);
}
return 0;
}
static int sym_not_found_error(const char *sym_name, int idx)
{
if (idx > 0) {
pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
idx, sym_name);
} else if (!idx) {
pr_err("Global symbol '%s' not found.\n", sym_name);
} else {
pr_err("Symbol '%s' not found.\n", sym_name);
}
pr_err("Note that symbols must be functions.\n");
return -EINVAL;
}
static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
{
struct sym_args args = {
.name = sym_name,
.idx = idx,
.global = !idx,
.selected = idx > 0,
};
int err;
*start = 0;
*size = 0;
err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
if (err < 0) {
pr_err("Failed to parse /proc/kallsyms\n");
return err;
}
if (args.duplicate) {
pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
args.cnt = 0;
kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
sym_name);
pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
return -EINVAL;
}
if (!args.started) {
pr_err("Kernel symbol lookup: ");
return sym_not_found_error(sym_name, idx);
}
*start = args.start;
*size = args.size;
return 0;
}
static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
char type, u64 start)
{
struct sym_args *args = arg;
u64 size;
if (!kallsyms__is_function(type))
return 0;
if (!args->started) {
args->started = true;
args->start = start;
}
/* Don't know exactly where the kernel ends, so we add a page */
size = round_up(start, page_size) + page_size - args->start;
if (size > args->size)
args->size = size;
return 0;
}
static int addr_filter__entire_kernel(struct addr_filter *filt)
{
struct sym_args args = { .started = false };
int err;
err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
if (err < 0 || !args.started) {
pr_err("Failed to parse /proc/kallsyms\n");
return err;
}
filt->addr = args.start;
filt->size = args.size;
return 0;
}
static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
{
if (start + size >= filt->addr)
return 0;
if (filt->sym_from) {
pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
filt->sym_to, start, filt->sym_from, filt->addr);
} else {
pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
filt->sym_to, start, filt->addr);
}
return -EINVAL;
}
static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
{
bool no_size = false;
u64 start, size;
int err;
if (symbol_conf.kptr_restrict) {
pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
return -EINVAL;
}
if (filt->sym_from && !strcmp(filt->sym_from, "*"))
return addr_filter__entire_kernel(filt);
if (filt->sym_from) {
err = find_kern_sym(filt->sym_from, &start, &size,
filt->sym_from_idx);
if (err)
return err;
filt->addr = start;
if (filt->range && !filt->size && !filt->sym_to) {
filt->size = size;
no_size = !size;
}
}
if (filt->sym_to) {
err = find_kern_sym(filt->sym_to, &start, &size,
filt->sym_to_idx);
if (err)
return err;
err = check_end_after_start(filt, start, size);
if (err)
return err;
filt->size = start + size - filt->addr;
no_size = !size;
}
/* The very last symbol in kallsyms does not imply a particular size */
if (no_size) {
pr_err("Cannot determine size of symbol '%s'\n",
filt->sym_to ? filt->sym_to : filt->sym_from);
return -EINVAL;
}
return 0;
}
static struct dso *load_dso(const char *name)
{
struct map *map;
struct dso *dso;
map = dso__new_map(name);
if (!map)
return NULL;
if (map__load(map) < 0)
pr_err("File '%s' not found or has no symbols.\n", name);
dso = dso__get(map__dso(map));
map__put(map);
return dso;
}
static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
int idx)
{
/* Same name, and global or the n'th found or any */
return !arch__compare_symbol_names(name, sym->name) &&
((!idx && sym->binding == STB_GLOBAL) ||
(idx > 0 && ++*cnt == idx) ||
idx < 0);
}
static void print_duplicate_syms(struct dso *dso, const char *sym_name)
{
struct symbol *sym;
bool near = false;
int cnt = 0;
pr_err("Multiple symbols with name '%s'\n", sym_name);
sym = dso__first_symbol(dso);
while (sym) {
if (dso_sym_match(sym, sym_name, &cnt, -1)) {
pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
++cnt, sym->start,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w',
sym->name);
near = true;
} else if (near) {
near = false;
pr_err("\t\twhich is near\t\t%s\n", sym->name);
}
sym = dso__next_symbol(sym);
}
pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
sym_name);
pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
}
static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
u64 *size, int idx)
{
struct symbol *sym;
int cnt = 0;
*start = 0;
*size = 0;
sym = dso__first_symbol(dso);
while (sym) {
if (*start) {
if (!*size)
*size = sym->start - *start;
if (idx > 0) {
if (*size)
return 0;
} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
print_duplicate_syms(dso, sym_name);
return -EINVAL;
}
} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
*start = sym->start;
*size = sym->end - sym->start;
}
sym = dso__next_symbol(sym);
}
if (!*start)
return sym_not_found_error(sym_name, idx);
return 0;
}
static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
{
if (dso__data_file_size(dso, NULL)) {
pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
filt->filename);
return -EINVAL;
}
filt->addr = 0;
filt->size = dso->data.file_size;
return 0;
}
static int addr_filter__resolve_syms(struct addr_filter *filt)
{
u64 start, size;
struct dso *dso;
int err = 0;
if (!filt->sym_from && !filt->sym_to)
return 0;
if (!filt->filename)
return addr_filter__resolve_kernel_syms(filt);
dso = load_dso(filt->filename);
if (!dso) {
pr_err("Failed to load symbols from: %s\n", filt->filename);
return -EINVAL;
}
if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
err = addr_filter__entire_dso(filt, dso);
goto put_dso;
}
if (filt->sym_from) {
err = find_dso_sym(dso, filt->sym_from, &start, &size,
filt->sym_from_idx);
if (err)
goto put_dso;
filt->addr = start;
if (filt->range && !filt->size && !filt->sym_to)
filt->size = size;
}
if (filt->sym_to) {
err = find_dso_sym(dso, filt->sym_to, &start, &size,
filt->sym_to_idx);
if (err)
goto put_dso;
err = check_end_after_start(filt, start, size);
if (err)
return err;
filt->size = start + size - filt->addr;
}
put_dso:
dso__put(dso);
return err;
}
static char *addr_filter__to_str(struct addr_filter *filt)
{
char filename_buf[PATH_MAX];
const char *at = "";
const char *fn = "";
char *filter;
int err;
if (filt->filename) {
at = "@";
fn = realpath(filt->filename, filename_buf);
if (!fn)
return NULL;
}
if (filt->range) {
err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
filt->action, filt->addr, filt->size, at, fn);
} else {
err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
filt->action, filt->addr, at, fn);
}
return err < 0 ? NULL : filter;
}
static int parse_addr_filter(struct evsel *evsel, const char *filter,
int max_nr)
{
struct addr_filters filts;
struct addr_filter *filt;
int err;
addr_filters__init(&filts);
err = addr_filters__parse_bare_filter(&filts, filter);
if (err)
goto out_exit;
if (filts.cnt > max_nr) {
pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
filts.cnt, max_nr);
err = -EINVAL;
goto out_exit;
}
list_for_each_entry(filt, &filts.head, list) {
char *new_filter;
err = addr_filter__resolve_syms(filt);
if (err)
goto out_exit;
new_filter = addr_filter__to_str(filt);
if (!new_filter) {
err = -ENOMEM;
goto out_exit;
}
if (evsel__append_addr_filter(evsel, new_filter)) {
err = -ENOMEM;
goto out_exit;
}
}
out_exit:
addr_filters__exit(&filts);
if (err) {
pr_err("Failed to parse address filter: '%s'\n", filter);
pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
pr_err("Where multiple filters are separated by space or comma.\n");
}
return err;
}
static int evsel__nr_addr_filter(struct evsel *evsel)
{
struct perf_pmu *pmu = evsel__find_pmu(evsel);
int nr_addr_filters = 0;
if (!pmu)
return 0;
perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
return nr_addr_filters;
}
int auxtrace_parse_filters(struct evlist *evlist)
{
struct evsel *evsel;
char *filter;
int err, max_nr;
evlist__for_each_entry(evlist, evsel) {
filter = evsel->filter;
max_nr = evsel__nr_addr_filter(evsel);
if (!filter || !max_nr)
continue;
evsel->filter = NULL;
err = parse_addr_filter(evsel, filter, max_nr);
free(filter);
if (err)
return err;
pr_debug("Address filter: %s\n", evsel->filter);
}
return 0;
}
int auxtrace__process_event(struct perf_session *session, union perf_event *event,
struct perf_sample *sample, struct perf_tool *tool)
{
if (!session->auxtrace)
return 0;
return session->auxtrace->process_event(session, event, sample, tool);
}
void auxtrace__dump_auxtrace_sample(struct perf_session *session,
struct perf_sample *sample)
{
if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
auxtrace__dont_decode(session))
return;
session->auxtrace->dump_auxtrace_sample(session, sample);
}
int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
{
if (!session->auxtrace)
return 0;
return session->auxtrace->flush_events(session, tool);
}
void auxtrace__free_events(struct perf_session *session)
{
if (!session->auxtrace)
return;
return session->auxtrace->free_events(session);
}
void auxtrace__free(struct perf_session *session)
{
if (!session->auxtrace)
return;
return session->auxtrace->free(session);
}
bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel)
{
if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
return false;
return session->auxtrace->evsel_is_auxtrace(session, evsel);
}
| linux-master | tools/perf/util/auxtrace.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/debug.h"
#include "util/event.h"
#include <subcmd/parse-options.h>
#include "util/parse-branch-options.h"
#include <stdlib.h>
#include <string.h>
#define BRANCH_OPT(n, m) \
{ .name = n, .mode = (m) }
#define BRANCH_END { .name = NULL }
struct branch_mode {
const char *name;
int mode;
};
static const struct branch_mode branch_modes[] = {
BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
BRANCH_OPT("ind_jmp", PERF_SAMPLE_BRANCH_IND_JUMP),
BRANCH_OPT("call", PERF_SAMPLE_BRANCH_CALL),
BRANCH_OPT("no_flags", PERF_SAMPLE_BRANCH_NO_FLAGS),
BRANCH_OPT("no_cycles", PERF_SAMPLE_BRANCH_NO_CYCLES),
BRANCH_OPT("save_type", PERF_SAMPLE_BRANCH_TYPE_SAVE),
BRANCH_OPT("stack", PERF_SAMPLE_BRANCH_CALL_STACK),
BRANCH_OPT("hw_index", PERF_SAMPLE_BRANCH_HW_INDEX),
BRANCH_OPT("priv", PERF_SAMPLE_BRANCH_PRIV_SAVE),
BRANCH_END
};
int parse_branch_str(const char *str, __u64 *mode)
{
#define ONLY_PLM \
(PERF_SAMPLE_BRANCH_USER |\
PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
int ret = 0;
char *p, *s;
char *os = NULL;
const struct branch_mode *br;
if (str == NULL) {
*mode = PERF_SAMPLE_BRANCH_ANY;
return 0;
}
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
for (br = branch_modes; br->name; br++) {
if (!strcasecmp(s, br->name))
break;
}
if (!br->name) {
ret = -1;
pr_warning("unknown branch filter %s,"
" check man page\n", s);
goto error;
}
*mode |= br->mode;
if (!p)
break;
s = p + 1;
}
/* default to any branch */
if ((*mode & ~ONLY_PLM) == 0) {
*mode = PERF_SAMPLE_BRANCH_ANY;
}
error:
free(os);
return ret;
}
int
parse_branch_stack(const struct option *opt, const char *str, int unset)
{
__u64 *mode = (__u64 *)opt->value;
if (unset)
return 0;
/*
* cannot set it twice, -b + --branch-filter for instance
*/
if (*mode) {
pr_err("Error: Can't use --branch-any (-b) with --branch-filter (-j).\n");
return -1;
}
return parse_branch_str(str, mode);
}
| linux-master | tools/perf/util/parse-branch-options.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <linux/zalloc.h>
#include "values.h"
#include "debug.h"
int perf_read_values_init(struct perf_read_values *values)
{
values->threads_max = 16;
values->pid = malloc(values->threads_max * sizeof(*values->pid));
values->tid = malloc(values->threads_max * sizeof(*values->tid));
values->value = zalloc(values->threads_max * sizeof(*values->value));
if (!values->pid || !values->tid || !values->value) {
pr_debug("failed to allocate read_values threads arrays");
goto out_free_pid;
}
values->threads = 0;
values->counters_max = 16;
values->counterrawid = malloc(values->counters_max
* sizeof(*values->counterrawid));
values->countername = malloc(values->counters_max
* sizeof(*values->countername));
if (!values->counterrawid || !values->countername) {
pr_debug("failed to allocate read_values counters arrays");
goto out_free_counter;
}
values->counters = 0;
return 0;
out_free_counter:
zfree(&values->counterrawid);
zfree(&values->countername);
out_free_pid:
zfree(&values->pid);
zfree(&values->tid);
zfree(&values->value);
return -ENOMEM;
}
void perf_read_values_destroy(struct perf_read_values *values)
{
int i;
if (!values->threads_max || !values->counters_max)
return;
for (i = 0; i < values->threads; i++)
zfree(&values->value[i]);
zfree(&values->value);
zfree(&values->pid);
zfree(&values->tid);
zfree(&values->counterrawid);
for (i = 0; i < values->counters; i++)
zfree(&values->countername[i]);
zfree(&values->countername);
}
static int perf_read_values__enlarge_threads(struct perf_read_values *values)
{
int nthreads_max = values->threads_max * 2;
void *npid = realloc(values->pid, nthreads_max * sizeof(*values->pid)),
*ntid = realloc(values->tid, nthreads_max * sizeof(*values->tid)),
*nvalue = realloc(values->value, nthreads_max * sizeof(*values->value));
if (!npid || !ntid || !nvalue)
goto out_err;
values->threads_max = nthreads_max;
values->pid = npid;
values->tid = ntid;
values->value = nvalue;
return 0;
out_err:
free(npid);
free(ntid);
free(nvalue);
pr_debug("failed to enlarge read_values threads arrays");
return -ENOMEM;
}
static int perf_read_values__findnew_thread(struct perf_read_values *values,
u32 pid, u32 tid)
{
int i;
for (i = 0; i < values->threads; i++)
if (values->pid[i] == pid && values->tid[i] == tid)
return i;
if (values->threads == values->threads_max) {
i = perf_read_values__enlarge_threads(values);
if (i < 0)
return i;
}
i = values->threads;
values->value[i] = zalloc(values->counters_max * sizeof(**values->value));
if (!values->value[i]) {
pr_debug("failed to allocate read_values counters array");
return -ENOMEM;
}
values->pid[i] = pid;
values->tid[i] = tid;
values->threads = i + 1;
return i;
}
static int perf_read_values__enlarge_counters(struct perf_read_values *values)
{
char **countername;
int i, counters_max = values->counters_max * 2;
u64 *counterrawid = realloc(values->counterrawid, counters_max * sizeof(*values->counterrawid));
if (!counterrawid) {
pr_debug("failed to enlarge read_values rawid array");
goto out_enomem;
}
countername = realloc(values->countername, counters_max * sizeof(*values->countername));
if (!countername) {
pr_debug("failed to enlarge read_values rawid array");
goto out_free_rawid;
}
for (i = 0; i < values->threads; i++) {
u64 *value = realloc(values->value[i], counters_max * sizeof(**values->value));
int j;
if (!value) {
pr_debug("failed to enlarge read_values ->values array");
goto out_free_name;
}
for (j = values->counters_max; j < counters_max; j++)
value[j] = 0;
values->value[i] = value;
}
values->counters_max = counters_max;
values->counterrawid = counterrawid;
values->countername = countername;
return 0;
out_free_name:
free(countername);
out_free_rawid:
free(counterrawid);
out_enomem:
return -ENOMEM;
}
static int perf_read_values__findnew_counter(struct perf_read_values *values,
u64 rawid, const char *name)
{
int i;
for (i = 0; i < values->counters; i++)
if (values->counterrawid[i] == rawid)
return i;
if (values->counters == values->counters_max) {
i = perf_read_values__enlarge_counters(values);
if (i)
return i;
}
i = values->counters++;
values->counterrawid[i] = rawid;
values->countername[i] = strdup(name);
return i;
}
int perf_read_values_add_value(struct perf_read_values *values,
u32 pid, u32 tid,
u64 rawid, const char *name, u64 value)
{
int tindex, cindex;
tindex = perf_read_values__findnew_thread(values, pid, tid);
if (tindex < 0)
return tindex;
cindex = perf_read_values__findnew_counter(values, rawid, name);
if (cindex < 0)
return cindex;
values->value[tindex][cindex] += value;
return 0;
}
static void perf_read_values__display_pretty(FILE *fp,
struct perf_read_values *values)
{
int i, j;
int pidwidth, tidwidth;
int *counterwidth;
counterwidth = malloc(values->counters * sizeof(*counterwidth));
if (!counterwidth) {
fprintf(fp, "INTERNAL ERROR: Failed to allocate counterwidth array\n");
return;
}
tidwidth = 3;
pidwidth = 3;
for (j = 0; j < values->counters; j++)
counterwidth[j] = strlen(values->countername[j]);
for (i = 0; i < values->threads; i++) {
int width;
width = snprintf(NULL, 0, "%d", values->pid[i]);
if (width > pidwidth)
pidwidth = width;
width = snprintf(NULL, 0, "%d", values->tid[i]);
if (width > tidwidth)
tidwidth = width;
for (j = 0; j < values->counters; j++) {
width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > counterwidth[j])
counterwidth[j] = width;
}
}
fprintf(fp, "# %*s %*s", pidwidth, "PID", tidwidth, "TID");
for (j = 0; j < values->counters; j++)
fprintf(fp, " %*s", counterwidth[j], values->countername[j]);
fprintf(fp, "\n");
for (i = 0; i < values->threads; i++) {
fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
tidwidth, values->tid[i]);
for (j = 0; j < values->counters; j++)
fprintf(fp, " %*" PRIu64,
counterwidth[j], values->value[i][j]);
fprintf(fp, "\n");
}
free(counterwidth);
}
static void perf_read_values__display_raw(FILE *fp,
struct perf_read_values *values)
{
int width, pidwidth, tidwidth, namewidth, rawwidth, countwidth;
int i, j;
tidwidth = 3; /* TID */
pidwidth = 3; /* PID */
namewidth = 4; /* "Name" */
rawwidth = 3; /* "Raw" */
countwidth = 5; /* "Count" */
for (i = 0; i < values->threads; i++) {
width = snprintf(NULL, 0, "%d", values->pid[i]);
if (width > pidwidth)
pidwidth = width;
width = snprintf(NULL, 0, "%d", values->tid[i]);
if (width > tidwidth)
tidwidth = width;
}
for (j = 0; j < values->counters; j++) {
width = strlen(values->countername[j]);
if (width > namewidth)
namewidth = width;
width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
if (width > rawwidth)
rawwidth = width;
}
for (i = 0; i < values->threads; i++) {
for (j = 0; j < values->counters; j++) {
width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > countwidth)
countwidth = width;
}
}
fprintf(fp, "# %*s %*s %*s %*s %*s\n",
pidwidth, "PID", tidwidth, "TID",
namewidth, "Name", rawwidth, "Raw",
countwidth, "Count");
for (i = 0; i < values->threads; i++)
for (j = 0; j < values->counters; j++)
fprintf(fp, " %*d %*d %*s %*" PRIx64 " %*" PRIu64,
pidwidth, values->pid[i],
tidwidth, values->tid[i],
namewidth, values->countername[j],
rawwidth, values->counterrawid[j],
countwidth, values->value[i][j]);
}
void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw)
{
if (raw)
perf_read_values__display_raw(fp, values);
else
perf_read_values__display_pretty(fp, values);
}
| linux-master | tools/perf/util/values.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* trace-event-scripting. Scripting engine common and initialization code.
*
* Copyright (C) 2009-2010 Tom Zanussi <[email protected]>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
#include "debug.h"
#include "trace-event.h"
#include "evsel.h"
#include <linux/zalloc.h>
#include "util/sample.h"
struct scripting_context *scripting_context;
void scripting_context__update(struct scripting_context *c,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
struct addr_location *addr_al)
{
c->event_data = sample->raw_data;
c->pevent = NULL;
#ifdef HAVE_LIBTRACEEVENT
if (evsel->tp_format)
c->pevent = evsel->tp_format->tep;
#endif
c->event = event;
c->sample = sample;
c->evsel = evsel;
c->al = al;
c->addr_al = addr_al;
}
static int flush_script_unsupported(void)
{
return 0;
}
static int stop_script_unsupported(void)
{
return 0;
}
static void process_event_unsupported(union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel __maybe_unused,
struct addr_location *al __maybe_unused,
struct addr_location *addr_al __maybe_unused)
{
}
static void print_python_unsupported_msg(void)
{
fprintf(stderr, "Python scripting not supported."
" Install libpython and rebuild perf to enable it.\n"
"For example:\n # apt-get install python-dev (ubuntu)"
"\n # yum install python-devel (Fedora)"
"\n etc.\n");
}
static int python_start_script_unsupported(const char *script __maybe_unused,
int argc __maybe_unused,
const char **argv __maybe_unused,
struct perf_session *session __maybe_unused)
{
print_python_unsupported_msg();
return -1;
}
static int python_generate_script_unsupported(struct tep_handle *pevent
__maybe_unused,
const char *outfile
__maybe_unused)
{
print_python_unsupported_msg();
return -1;
}
struct scripting_ops python_scripting_unsupported_ops = {
.name = "Python",
.dirname = "python",
.start_script = python_start_script_unsupported,
.flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
.process_event = process_event_unsupported,
.generate_script = python_generate_script_unsupported,
};
static void register_python_scripting(struct scripting_ops *scripting_ops)
{
if (scripting_context == NULL)
scripting_context = malloc(sizeof(*scripting_context));
if (scripting_context == NULL ||
script_spec_register("Python", scripting_ops) ||
script_spec_register("py", scripting_ops)) {
pr_err("Error registering Python script extension: disabling it\n");
zfree(&scripting_context);
}
}
#ifndef HAVE_LIBPYTHON_SUPPORT
void setup_python_scripting(void)
{
register_python_scripting(&python_scripting_unsupported_ops);
}
#else
extern struct scripting_ops python_scripting_ops;
void setup_python_scripting(void)
{
register_python_scripting(&python_scripting_ops);
}
#endif
#ifdef HAVE_LIBTRACEEVENT
static void print_perl_unsupported_msg(void)
{
fprintf(stderr, "Perl scripting not supported."
" Install libperl and rebuild perf to enable it.\n"
"For example:\n # apt-get install libperl-dev (ubuntu)"
"\n # yum install 'perl(ExtUtils::Embed)' (Fedora)"
"\n etc.\n");
}
static int perl_start_script_unsupported(const char *script __maybe_unused,
int argc __maybe_unused,
const char **argv __maybe_unused,
struct perf_session *session __maybe_unused)
{
print_perl_unsupported_msg();
return -1;
}
static int perl_generate_script_unsupported(struct tep_handle *pevent
__maybe_unused,
const char *outfile __maybe_unused)
{
print_perl_unsupported_msg();
return -1;
}
struct scripting_ops perl_scripting_unsupported_ops = {
.name = "Perl",
.dirname = "perl",
.start_script = perl_start_script_unsupported,
.flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
.process_event = process_event_unsupported,
.generate_script = perl_generate_script_unsupported,
};
static void register_perl_scripting(struct scripting_ops *scripting_ops)
{
if (scripting_context == NULL)
scripting_context = malloc(sizeof(*scripting_context));
if (scripting_context == NULL ||
script_spec_register("Perl", scripting_ops) ||
script_spec_register("pl", scripting_ops)) {
pr_err("Error registering Perl script extension: disabling it\n");
zfree(&scripting_context);
}
}
#ifndef HAVE_LIBPERL_SUPPORT
void setup_perl_scripting(void)
{
register_perl_scripting(&perl_scripting_unsupported_ops);
}
#else
extern struct scripting_ops perl_scripting_ops;
void setup_perl_scripting(void)
{
register_perl_scripting(&perl_scripting_ops);
}
#endif
#endif
| linux-master | tools/perf/util/trace-event-scripting.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <signal.h>
#include <inttypes.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <api/fs/fs.h>
#include <byteswap.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <perf/cpumap.h>
#include "map_symbol.h"
#include "branch.h"
#include "debug.h"
#include "env.h"
#include "evlist.h"
#include "evsel.h"
#include "memswap.h"
#include "map.h"
#include "symbol.h"
#include "session.h"
#include "tool.h"
#include "perf_regs.h"
#include "asm/bug.h"
#include "auxtrace.h"
#include "thread.h"
#include "thread-stack.h"
#include "sample-raw.h"
#include "stat.h"
#include "tsc.h"
#include "ui/progress.h"
#include "util.h"
#include "arch/common.h"
#include "units.h"
#include <internal/lib.h>
#ifdef HAVE_ZSTD_SUPPORT
static int perf_session__process_compressed_event(struct perf_session *session,
union perf_event *event, u64 file_offset,
const char *file_path)
{
void *src;
size_t decomp_size, src_size;
u64 decomp_last_rem = 0;
size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
if (decomp_last) {
decomp_last_rem = decomp_last->size - decomp_last->head;
decomp_len += decomp_last_rem;
}
mmap_len = sizeof(struct decomp) + decomp_len;
decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (decomp == MAP_FAILED) {
pr_err("Couldn't allocate memory for decompression\n");
return -1;
}
decomp->file_pos = file_offset;
decomp->file_path = file_path;
decomp->mmap_len = mmap_len;
decomp->head = 0;
if (decomp_last_rem) {
memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
decomp->size = decomp_last_rem;
}
src = (void *)event + sizeof(struct perf_record_compressed);
src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
if (!decomp_size) {
munmap(decomp, mmap_len);
pr_err("Couldn't decompress data\n");
return -1;
}
decomp->size += decomp_size;
if (session->active_decomp->decomp == NULL)
session->active_decomp->decomp = decomp;
else
session->active_decomp->decomp_last->next = decomp;
session->active_decomp->decomp_last = decomp;
pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
return 0;
}
#else /* !HAVE_ZSTD_SUPPORT */
#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
#endif
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool,
u64 file_offset,
const char *file_path);
static int perf_session__open(struct perf_session *session, int repipe_fd)
{
struct perf_data *data = session->data;
if (perf_session__read_header(session, repipe_fd) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)\n");
return -1;
}
if (perf_data__is_pipe(data))
return 0;
if (perf_header__has_feat(&session->header, HEADER_STAT))
return 0;
if (!evlist__valid_sample_type(session->evlist)) {
pr_err("non matching sample_type\n");
return -1;
}
if (!evlist__valid_sample_id_all(session->evlist)) {
pr_err("non matching sample_id_all\n");
return -1;
}
if (!evlist__valid_read_format(session->evlist)) {
pr_err("non matching read_format\n");
return -1;
}
return 0;
}
void perf_session__set_id_hdr_size(struct perf_session *session)
{
u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
machines__set_id_hdr_size(&session->machines, id_hdr_size);
}
int perf_session__create_kernel_maps(struct perf_session *session)
{
int ret = machine__create_kernel_maps(&session->machines.host);
if (ret >= 0)
ret = machines__create_guest_kernel_maps(&session->machines);
return ret;
}
static void perf_session__destroy_kernel_maps(struct perf_session *session)
{
machines__destroy_kernel_maps(&session->machines);
}
static bool perf_session__has_comm_exec(struct perf_session *session)
{
struct evsel *evsel;
evlist__for_each_entry(session->evlist, evsel) {
if (evsel->core.attr.comm_exec)
return true;
}
return false;
}
static void perf_session__set_comm_exec(struct perf_session *session)
{
bool comm_exec = perf_session__has_comm_exec(session);
machines__set_comm_exec(&session->machines, comm_exec);
}
static int ordered_events__deliver_event(struct ordered_events *oe,
struct ordered_event *event)
{
struct perf_session *session = container_of(oe, struct perf_session,
ordered_events);
return perf_session__deliver_event(session, event->event,
session->tool, event->file_offset,
event->file_path);
}
struct perf_session *__perf_session__new(struct perf_data *data,
bool repipe, int repipe_fd,
struct perf_tool *tool)
{
int ret = -ENOMEM;
struct perf_session *session = zalloc(sizeof(*session));
if (!session)
goto out;
session->repipe = repipe;
session->tool = tool;
session->decomp_data.zstd_decomp = &session->zstd_data;
session->active_decomp = &session->decomp_data;
INIT_LIST_HEAD(&session->auxtrace_index);
machines__init(&session->machines);
ordered_events__init(&session->ordered_events,
ordered_events__deliver_event, NULL);
perf_env__init(&session->header.env);
if (data) {
ret = perf_data__open(data);
if (ret < 0)
goto out_delete;
session->data = data;
if (perf_data__is_read(data)) {
ret = perf_session__open(session, repipe_fd);
if (ret < 0)
goto out_delete;
/*
* set session attributes that are present in perf.data
* but not in pipe-mode.
*/
if (!data->is_pipe) {
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
evlist__init_trace_event_sample_raw(session->evlist);
/* Open the directory data. */
if (data->is_dir) {
ret = perf_data__open_dir(data);
if (ret)
goto out_delete;
}
if (!symbol_conf.kallsyms_name &&
!symbol_conf.vmlinux_name)
symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
}
} else {
session->machines.host.env = &perf_env;
}
session->machines.host.single_address_space =
perf_env__single_address_space(session->machines.host.env);
if (!data || perf_data__is_write(data)) {
/*
* In O_RDONLY mode this will be performed when reading the
* kernel MMAP event, in perf_event__process_mmap().
*/
if (perf_session__create_kernel_maps(session) < 0)
pr_warning("Cannot read kernel map\n");
}
/*
* In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
* processed, so evlist__sample_id_all is not meaningful here.
*/
if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_events = false;
}
return session;
out_delete:
perf_session__delete(session);
out:
return ERR_PTR(ret);
}
static void perf_decomp__release_events(struct decomp *next)
{
struct decomp *decomp;
size_t mmap_len;
do {
decomp = next;
if (decomp == NULL)
break;
next = decomp->next;
mmap_len = decomp->mmap_len;
munmap(decomp, mmap_len);
} while (1);
}
void perf_session__delete(struct perf_session *session)
{
if (session == NULL)
return;
auxtrace__free(session);
auxtrace_index__free(&session->auxtrace_index);
perf_session__destroy_kernel_maps(session);
perf_decomp__release_events(session->decomp_data.decomp);
perf_env__exit(&session->header.env);
machines__exit(&session->machines);
if (session->data) {
if (perf_data__is_read(session->data))
evlist__delete(session->evlist);
perf_data__close(session->data);
}
#ifdef HAVE_LIBTRACEEVENT
trace_event__cleanup(&session->tevent);
#endif
free(session);
}
static int process_event_synth_tracing_data_stub(struct perf_session *session
__maybe_unused,
union perf_event *event
__maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct evlist **pevlist
__maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct evlist **pevlist
__maybe_unused)
{
if (dump_trace)
perf_event__fprintf_event_update(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel __maybe_unused,
struct machine *machine __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int skipn(int fd, off_t n)
{
char buf[4096];
ssize_t ret;
while (n > 0) {
ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
if (ret <= 0)
return ret;
n -= ret;
}
return 0;
}
static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
union perf_event *event)
{
dump_printf(": unhandled!\n");
if (perf_data__is_pipe(session->data))
skipn(perf_data__fd(session->data), event->auxtrace.size);
return event->auxtrace.size;
}
static int process_event_op2_stub(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static
int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_thread_map(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static
int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_cpu_map(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static
int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_stat_config(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_stat(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_stat_round(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_time_conv(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
u64 file_offset __maybe_unused,
const char *file_path __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
tool->sample = process_event_sample_stub;
if (tool->mmap == NULL)
tool->mmap = process_event_stub;
if (tool->mmap2 == NULL)
tool->mmap2 = process_event_stub;
if (tool->comm == NULL)
tool->comm = process_event_stub;
if (tool->namespaces == NULL)
tool->namespaces = process_event_stub;
if (tool->cgroup == NULL)
tool->cgroup = process_event_stub;
if (tool->fork == NULL)
tool->fork = process_event_stub;
if (tool->exit == NULL)
tool->exit = process_event_stub;
if (tool->lost == NULL)
tool->lost = perf_event__process_lost;
if (tool->lost_samples == NULL)
tool->lost_samples = perf_event__process_lost_samples;
if (tool->aux == NULL)
tool->aux = perf_event__process_aux;
if (tool->itrace_start == NULL)
tool->itrace_start = perf_event__process_itrace_start;
if (tool->context_switch == NULL)
tool->context_switch = perf_event__process_switch;
if (tool->ksymbol == NULL)
tool->ksymbol = perf_event__process_ksymbol;
if (tool->bpf == NULL)
tool->bpf = perf_event__process_bpf;
if (tool->text_poke == NULL)
tool->text_poke = perf_event__process_text_poke;
if (tool->aux_output_hw_id == NULL)
tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
tool->throttle = process_event_stub;
if (tool->unthrottle == NULL)
tool->unthrottle = process_event_stub;
if (tool->attr == NULL)
tool->attr = process_event_synth_attr_stub;
if (tool->event_update == NULL)
tool->event_update = process_event_synth_event_update_stub;
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
tool->build_id = process_event_op2_stub;
if (tool->finished_round == NULL) {
if (tool->ordered_events)
tool->finished_round = perf_event__process_finished_round;
else
tool->finished_round = process_finished_round_stub;
}
if (tool->id_index == NULL)
tool->id_index = process_event_op2_stub;
if (tool->auxtrace_info == NULL)
tool->auxtrace_info = process_event_op2_stub;
if (tool->auxtrace == NULL)
tool->auxtrace = process_event_auxtrace_stub;
if (tool->auxtrace_error == NULL)
tool->auxtrace_error = process_event_op2_stub;
if (tool->thread_map == NULL)
tool->thread_map = process_event_thread_map_stub;
if (tool->cpu_map == NULL)
tool->cpu_map = process_event_cpu_map_stub;
if (tool->stat_config == NULL)
tool->stat_config = process_event_stat_config_stub;
if (tool->stat == NULL)
tool->stat = process_stat_stub;
if (tool->stat_round == NULL)
tool->stat_round = process_stat_round_stub;
if (tool->time_conv == NULL)
tool->time_conv = process_event_time_conv_stub;
if (tool->feature == NULL)
tool->feature = process_event_op2_stub;
if (tool->compressed == NULL)
tool->compressed = perf_session__process_compressed_event;
if (tool->finished_init == NULL)
tool->finished_init = process_event_op2_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
{
void *end = (void *) event + event->header.size;
int size = end - data;
BUG_ON(size % sizeof(u64));
mem_bswap_64(data, size);
}
static void perf_event__all64_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
struct perf_event_header *hdr = &event->header;
mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
}
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
{
event->comm.pid = bswap_32(event->comm.pid);
event->comm.tid = bswap_32(event->comm.tid);
if (sample_id_all) {
void *data = &event->comm.comm;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static void perf_event__mmap_swap(union perf_event *event,
bool sample_id_all)
{
event->mmap.pid = bswap_32(event->mmap.pid);
event->mmap.tid = bswap_32(event->mmap.tid);
event->mmap.start = bswap_64(event->mmap.start);
event->mmap.len = bswap_64(event->mmap.len);
event->mmap.pgoff = bswap_64(event->mmap.pgoff);
if (sample_id_all) {
void *data = &event->mmap.filename;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static void perf_event__mmap2_swap(union perf_event *event,
bool sample_id_all)
{
event->mmap2.pid = bswap_32(event->mmap2.pid);
event->mmap2.tid = bswap_32(event->mmap2.tid);
event->mmap2.start = bswap_64(event->mmap2.start);
event->mmap2.len = bswap_64(event->mmap2.len);
event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
event->mmap2.maj = bswap_32(event->mmap2.maj);
event->mmap2.min = bswap_32(event->mmap2.min);
event->mmap2.ino = bswap_64(event->mmap2.ino);
event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
}
if (sample_id_all) {
void *data = &event->mmap2.filename;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
{
event->fork.pid = bswap_32(event->fork.pid);
event->fork.tid = bswap_32(event->fork.tid);
event->fork.ppid = bswap_32(event->fork.ppid);
event->fork.ptid = bswap_32(event->fork.ptid);
event->fork.time = bswap_64(event->fork.time);
if (sample_id_all)
swap_sample_id_all(event, &event->fork + 1);
}
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
{
event->read.pid = bswap_32(event->read.pid);
event->read.tid = bswap_32(event->read.tid);
event->read.value = bswap_64(event->read.value);
event->read.time_enabled = bswap_64(event->read.time_enabled);
event->read.time_running = bswap_64(event->read.time_running);
event->read.id = bswap_64(event->read.id);
if (sample_id_all)
swap_sample_id_all(event, &event->read + 1);
}
static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
{
event->aux.aux_offset = bswap_64(event->aux.aux_offset);
event->aux.aux_size = bswap_64(event->aux.aux_size);
event->aux.flags = bswap_64(event->aux.flags);
if (sample_id_all)
swap_sample_id_all(event, &event->aux + 1);
}
static void perf_event__itrace_start_swap(union perf_event *event,
bool sample_id_all)
{
event->itrace_start.pid = bswap_32(event->itrace_start.pid);
event->itrace_start.tid = bswap_32(event->itrace_start.tid);
if (sample_id_all)
swap_sample_id_all(event, &event->itrace_start + 1);
}
static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
{
if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
event->context_switch.next_prev_pid =
bswap_32(event->context_switch.next_prev_pid);
event->context_switch.next_prev_tid =
bswap_32(event->context_switch.next_prev_tid);
}
if (sample_id_all)
swap_sample_id_all(event, &event->context_switch + 1);
}
static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
{
event->text_poke.addr = bswap_64(event->text_poke.addr);
event->text_poke.old_len = bswap_16(event->text_poke.old_len);
event->text_poke.new_len = bswap_16(event->text_poke.new_len);
if (sample_id_all) {
size_t len = sizeof(event->text_poke.old_len) +
sizeof(event->text_poke.new_len) +
event->text_poke.old_len +
event->text_poke.new_len;
void *data = &event->text_poke.old_len;
data += PERF_ALIGN(len, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static void perf_event__throttle_swap(union perf_event *event,
bool sample_id_all)
{
event->throttle.time = bswap_64(event->throttle.time);
event->throttle.id = bswap_64(event->throttle.id);
event->throttle.stream_id = bswap_64(event->throttle.stream_id);
if (sample_id_all)
swap_sample_id_all(event, &event->throttle + 1);
}
static void perf_event__namespaces_swap(union perf_event *event,
bool sample_id_all)
{
u64 i;
event->namespaces.pid = bswap_32(event->namespaces.pid);
event->namespaces.tid = bswap_32(event->namespaces.tid);
event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
for (i = 0; i < event->namespaces.nr_namespaces; i++) {
struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
ns->dev = bswap_64(ns->dev);
ns->ino = bswap_64(ns->ino);
}
if (sample_id_all)
swap_sample_id_all(event, &event->namespaces.link_info[i]);
}
static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
{
event->cgroup.id = bswap_64(event->cgroup.id);
if (sample_id_all) {
void *data = &event->cgroup.path;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static u8 revbyte(u8 b)
{
int rev = (b >> 4) | ((b & 0xf) << 4);
rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
return (u8) rev;
}
/*
* XXX this is hack in attempt to carry flags bitfield
* through endian village. ABI says:
*
* Bit-fields are allocated from right to left (least to most significant)
* on little-endian implementations and from left to right (most to least
* significant) on big-endian implementations.
*
* The above seems to be byte specific, so we need to reverse each
* byte of the bitfield. 'Internet' also says this might be implementation
* specific and we probably need proper fix and carry perf_event_attr
* bitfield flags in separate data file FEAT_ section. Thought this seems
* to work for now.
*/
static void swap_bitfield(u8 *p, unsigned len)
{
unsigned i;
for (i = 0; i < len; i++) {
*p = revbyte(*p);
p++;
}
}
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
attr->type = bswap_32(attr->type);
attr->size = bswap_32(attr->size);
#define bswap_safe(f, n) \
(attr->size > (offsetof(struct perf_event_attr, f) + \
sizeof(attr->f) * (n)))
#define bswap_field(f, sz) \
do { \
if (bswap_safe(f, 0)) \
attr->f = bswap_##sz(attr->f); \
} while(0)
#define bswap_field_16(f) bswap_field(f, 16)
#define bswap_field_32(f) bswap_field(f, 32)
#define bswap_field_64(f) bswap_field(f, 64)
bswap_field_64(config);
bswap_field_64(sample_period);
bswap_field_64(sample_type);
bswap_field_64(read_format);
bswap_field_32(wakeup_events);
bswap_field_32(bp_type);
bswap_field_64(bp_addr);
bswap_field_64(bp_len);
bswap_field_64(branch_sample_type);
bswap_field_64(sample_regs_user);
bswap_field_32(sample_stack_user);
bswap_field_32(aux_watermark);
bswap_field_16(sample_max_stack);
bswap_field_32(aux_sample_size);
/*
* After read_format are bitfields. Check read_format because
* we are unable to use offsetof on bitfield.
*/
if (bswap_safe(read_format, 1))
swap_bitfield((u8 *) (&attr->read_format + 1),
sizeof(u64));
#undef bswap_field_64
#undef bswap_field_32
#undef bswap_field
#undef bswap_safe
}
static void perf_event__hdr_attr_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
size_t size;
perf_event__attr_swap(&event->attr.attr);
size = event->header.size;
size -= perf_record_header_attr_id(event) - (void *)event;
mem_bswap_64(perf_record_header_attr_id(event), size);
}
static void perf_event__event_update_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->event_update.type = bswap_64(event->event_update.type);
event->event_update.id = bswap_64(event->event_update.id);
}
static void perf_event__event_type_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->event_type.event_type.event_id =
bswap_64(event->event_type.event_type.event_id);
}
static void perf_event__tracing_data_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->tracing_data.size = bswap_32(event->tracing_data.size);
}
static void perf_event__auxtrace_info_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
size_t size;
event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
size = event->header.size;
size -= (void *)&event->auxtrace_info.priv - (void *)event;
mem_bswap_64(event->auxtrace_info.priv, size);
}
static void perf_event__auxtrace_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->auxtrace.size = bswap_64(event->auxtrace.size);
event->auxtrace.offset = bswap_64(event->auxtrace.offset);
event->auxtrace.reference = bswap_64(event->auxtrace.reference);
event->auxtrace.idx = bswap_32(event->auxtrace.idx);
event->auxtrace.tid = bswap_32(event->auxtrace.tid);
event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
}
static void perf_event__auxtrace_error_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
if (event->auxtrace_error.fmt)
event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
if (event->auxtrace_error.fmt >= 2) {
event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
}
}
static void perf_event__thread_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
unsigned i;
event->thread_map.nr = bswap_64(event->thread_map.nr);
for (i = 0; i < event->thread_map.nr; i++)
event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
}
static void perf_event__cpu_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
struct perf_record_cpu_map_data *data = &event->cpu_map.data;
data->type = bswap_16(data->type);
switch (data->type) {
case PERF_CPU_MAP__CPUS:
data->cpus_data.nr = bswap_16(data->cpus_data.nr);
for (unsigned i = 0; i < data->cpus_data.nr; i++)
data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
break;
case PERF_CPU_MAP__MASK:
data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
switch (data->mask32_data.long_size) {
case 4:
data->mask32_data.nr = bswap_16(data->mask32_data.nr);
for (unsigned i = 0; i < data->mask32_data.nr; i++)
data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
break;
case 8:
data->mask64_data.nr = bswap_16(data->mask64_data.nr);
for (unsigned i = 0; i < data->mask64_data.nr; i++)
data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
break;
default:
pr_err("cpu_map swap: unsupported long size\n");
}
break;
case PERF_CPU_MAP__RANGE_CPUS:
data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
break;
default:
break;
}
}
static void perf_event__stat_config_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
u64 size;
size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
size += 1; /* nr item itself */
mem_bswap_64(&event->stat_config.nr, size);
}
static void perf_event__stat_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->stat.id = bswap_64(event->stat.id);
event->stat.thread = bswap_32(event->stat.thread);
event->stat.cpu = bswap_32(event->stat.cpu);
event->stat.val = bswap_64(event->stat.val);
event->stat.ena = bswap_64(event->stat.ena);
event->stat.run = bswap_64(event->stat.run);
}
static void perf_event__stat_round_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->stat_round.type = bswap_64(event->stat_round.type);
event->stat_round.time = bswap_64(event->stat_round.time);
}
static void perf_event__time_conv_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
if (event_contains(event->time_conv, time_cycles)) {
event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
}
}
typedef void (*perf_event__swap_op)(union perf_event *event,
bool sample_id_all);
static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_MMAP] = perf_event__mmap_swap,
[PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
[PERF_RECORD_COMM] = perf_event__comm_swap,
[PERF_RECORD_FORK] = perf_event__task_swap,
[PERF_RECORD_EXIT] = perf_event__task_swap,
[PERF_RECORD_LOST] = perf_event__all64_swap,
[PERF_RECORD_READ] = perf_event__read_swap,
[PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
[PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
[PERF_RECORD_SAMPLE] = perf_event__all64_swap,
[PERF_RECORD_AUX] = perf_event__aux_swap,
[PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
[PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
[PERF_RECORD_SWITCH] = perf_event__switch_swap,
[PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
[PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
[PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
[PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
[PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
[PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
[PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
[PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
[PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
[PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
[PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
[PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
[PERF_RECORD_STAT] = perf_event__stat_swap,
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
[PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
/*
* When perf record finishes a pass on every buffers, it records this pseudo
* event.
* We record the max timestamp t found in the pass n.
* Assuming these timestamps are monotonic across cpus, we know that if
* a buffer still has events with timestamps below t, they will be all
* available and then read in the pass n + 1.
* Hence when we start to read the pass n + 2, we can safely flush every
* events with timestamps below t.
*
* ============ PASS n =================
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 1 | 2
* 2 | 3
* - | 4 <--- max recorded
*
* ============ PASS n + 1 ==============
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 3 | 5
* 4 | 6
* 5 | 7 <---- max recorded
*
* Flush every events below timestamp 4
*
* ============ PASS n + 2 ==============
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 6 | 8
* 7 | 9
* - | 10
*
* Flush every events below timestamp 7
* etc...
*/
int perf_event__process_finished_round(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe)
{
if (dump_trace)
fprintf(stdout, "\n");
return ordered_events__flush(oe, OE_FLUSH__ROUND);
}
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
u64 timestamp, u64 file_offset, const char *file_path)
{
return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
}
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
{
struct ip_callchain *callchain = sample->callchain;
struct branch_stack *lbr_stack = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
u64 kernel_callchain_nr = callchain->nr;
unsigned int i;
for (i = 0; i < kernel_callchain_nr; i++) {
if (callchain->ips[i] == PERF_CONTEXT_USER)
break;
}
if ((i != kernel_callchain_nr) && lbr_stack->nr) {
u64 total_nr;
/*
* LBR callstack can only get user call chain,
* i is kernel call chain number,
* 1 is PERF_CONTEXT_USER.
*
* The user call chain is stored in LBR registers.
* LBR are pair registers. The caller is stored
* in "from" register, while the callee is stored
* in "to" register.
* For example, there is a call stack
* "A"->"B"->"C"->"D".
* The LBR registers will be recorded like
* "C"->"D", "B"->"C", "A"->"B".
* So only the first "to" register and all "from"
* registers are needed to construct the whole stack.
*/
total_nr = i + 1 + lbr_stack->nr + 1;
kernel_callchain_nr = i + 1;
printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
for (i = 0; i < kernel_callchain_nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
i, callchain->ips[i]);
printf("..... %2d: %016" PRIx64 "\n",
(int)(kernel_callchain_nr), entries[0].to);
for (i = 0; i < lbr_stack->nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
(int)(i + kernel_callchain_nr + 1), entries[i].from);
}
}
static void callchain__printf(struct evsel *evsel,
struct perf_sample *sample)
{
unsigned int i;
struct ip_callchain *callchain = sample->callchain;
if (evsel__has_branch_callstack(evsel))
callchain__lbr_callstack_printf(sample);
printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
for (i = 0; i < callchain->nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
i, callchain->ips[i]);
}
static void branch_stack__printf(struct perf_sample *sample, bool callstack)
{
struct branch_entry *entries = perf_sample__branch_entries(sample);
uint64_t i;
if (!callstack) {
printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
} else {
/* the reason of adding 1 to nr is because after expanding
* branch stack it generates nr + 1 callstack records. e.g.,
* B()->C()
* A()->B()
* the final callstack should be:
* C()
* B()
* A()
*/
printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
}
for (i = 0; i < sample->branch_stack->nr; i++) {
struct branch_entry *e = &entries[i];
if (!callstack) {
printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n",
i, e->from, e->to,
(unsigned short)e->flags.cycles,
e->flags.mispred ? "M" : " ",
e->flags.predicted ? "P" : " ",
e->flags.abort ? "A" : " ",
e->flags.in_tx ? "T" : " ",
(unsigned)e->flags.reserved,
get_branch_type(e),
e->flags.spec ? branch_spec_desc(e->flags.spec) : "");
} else {
if (i == 0) {
printf("..... %2"PRIu64": %016" PRIx64 "\n"
"..... %2"PRIu64": %016" PRIx64 "\n",
i, e->to, i+1, e->from);
} else {
printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
}
}
}
}
static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
{
unsigned rid, i = 0;
for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs[i++];
printf(".... %-5s 0x%016" PRIx64 "\n",
perf_reg_name(rid, arch), val);
}
}
static const char *regs_abi[] = {
[PERF_SAMPLE_REGS_ABI_NONE] = "none",
[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
};
static inline const char *regs_dump_abi(struct regs_dump *d)
{
if (d->abi > PERF_SAMPLE_REGS_ABI_64)
return "unknown";
return regs_abi[d->abi];
}
static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
{
u64 mask = regs->mask;
printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
type,
mask,
regs_dump_abi(regs));
regs_dump__printf(mask, regs->regs, arch);
}
static void regs_user__printf(struct perf_sample *sample, const char *arch)
{
struct regs_dump *user_regs = &sample->user_regs;
if (user_regs->regs)
regs__printf("user", user_regs, arch);
}
static void regs_intr__printf(struct perf_sample *sample, const char *arch)
{
struct regs_dump *intr_regs = &sample->intr_regs;
if (intr_regs->regs)
regs__printf("intr", intr_regs, arch);
}
static void stack_user__printf(struct stack_dump *dump)
{
printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
dump->size, dump->offset);
}
static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
{
u64 sample_type = __evlist__combined_sample_type(evlist);
if (event->header.type != PERF_RECORD_SAMPLE &&
!evlist__sample_id_all(evlist)) {
fputs("-1 -1 ", stdout);
return;
}
if ((sample_type & PERF_SAMPLE_CPU))
printf("%u ", sample->cpu);
if (sample_type & PERF_SAMPLE_TIME)
printf("%" PRIu64 " ", sample->time);
}
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
printf("... sample_read:\n");
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
printf("...... time enabled %016" PRIx64 "\n",
sample->read.time_enabled);
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
printf("...... time running %016" PRIx64 "\n",
sample->read.time_running);
if (read_format & PERF_FORMAT_GROUP) {
struct sample_read_value *value = sample->read.group.values;
printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
sample_read_group__for_each(value, sample->read.group.nr, read_format) {
printf("..... id %016" PRIx64
", value %016" PRIx64,
value->id, value->value);
if (read_format & PERF_FORMAT_LOST)
printf(", lost %" PRIu64, value->lost);
printf("\n");
}
} else {
printf("..... id %016" PRIx64 ", value %016" PRIx64,
sample->read.one.id, sample->read.one.value);
if (read_format & PERF_FORMAT_LOST)
printf(", lost %" PRIu64, sample->read.one.lost);
printf("\n");
}
}
static void dump_event(struct evlist *evlist, union perf_event *event,
u64 file_offset, struct perf_sample *sample,
const char *file_path)
{
if (!dump_trace)
return;
printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
file_offset, file_path, event->header.size, event->header.type);
trace_event(event);
if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
evlist->trace_event_sample_raw(evlist, event, sample);
if (sample)
evlist__print_tstamp(evlist, event, sample);
printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
event->header.size, perf_event__name(event->header.type));
}
char *get_page_size_name(u64 size, char *str)
{
if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
return str;
}
static void dump_sample(struct evsel *evsel, union perf_event *event,
struct perf_sample *sample, const char *arch)
{
u64 sample_type;
char str[PAGE_SIZE_NAME_LEN];
if (!dump_trace)
return;
printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
sample_type = evsel->core.attr.sample_type;
if (evsel__has_callchain(evsel))
callchain__printf(evsel, sample);
if (evsel__has_br_stack(evsel))
branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
if (sample_type & PERF_SAMPLE_REGS_USER)
regs_user__printf(sample, arch);
if (sample_type & PERF_SAMPLE_REGS_INTR)
regs_intr__printf(sample, arch);
if (sample_type & PERF_SAMPLE_STACK_USER)
stack_user__printf(&sample->user_stack);
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
printf("... weight: %" PRIu64 "", sample->weight);
if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
printf(",0x%"PRIx16"", sample->ins_lat);
printf(",0x%"PRIx16"", sample->p_stage_cyc);
}
printf("\n");
}
if (sample_type & PERF_SAMPLE_DATA_SRC)
printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
if (sample_type & PERF_SAMPLE_TRANSACTION)
printf("... transaction: %" PRIx64 "\n", sample->transaction);
if (sample_type & PERF_SAMPLE_READ)
sample_read__printf(sample, evsel->core.attr.read_format);
}
static void dump_read(struct evsel *evsel, union perf_event *event)
{
struct perf_record_read *read_event = &event->read;
u64 read_format;
if (!dump_trace)
return;
printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
evsel__name(evsel), event->read.value);
if (!evsel)
return;
read_format = evsel->core.attr.read_format;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
if (read_format & PERF_FORMAT_ID)
printf("... id : %" PRI_lu64 "\n", read_event->id);
if (read_format & PERF_FORMAT_LOST)
printf("... lost : %" PRI_lu64 "\n", read_event->lost);
}
static struct machine *machines__find_for_cpumode(struct machines *machines,
union perf_event *event,
struct perf_sample *sample)
{
if (perf_guest &&
((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
(sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
u32 pid;
if (sample->machine_pid)
pid = sample->machine_pid;
else if (event->header.type == PERF_RECORD_MMAP
|| event->header.type == PERF_RECORD_MMAP2)
pid = event->mmap.pid;
else
pid = sample->pid;
/*
* Guest code machine is created as needed and does not use
* DEFAULT_GUEST_KERNEL_ID.
*/
if (symbol_conf.guest_code)
return machines__findnew(machines, pid);
return machines__find_guest(machines, pid);
}
return &machines->host;
}
static int deliver_sample_value(struct evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct sample_read_value *v,
struct machine *machine)
{
struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
struct evsel *evsel;
if (sid) {
sample->id = v->id;
sample->period = v->value - sid->period;
sid->period = v->value;
}
if (!sid || sid->evsel == NULL) {
++evlist->stats.nr_unknown_id;
return 0;
}
/*
* There's no reason to deliver sample
* for zero period, bail out.
*/
if (!sample->period)
return 0;
evsel = container_of(sid->evsel, struct evsel, core);
return tool->sample(tool, event, sample, evsel, machine);
}
static int deliver_sample_group(struct evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine,
u64 read_format)
{
int ret = -EINVAL;
struct sample_read_value *v = sample->read.group.values;
sample_read_group__for_each(v, sample->read.group.nr, read_format) {
ret = deliver_sample_value(evlist, tool, event, sample, v,
machine);
if (ret)
break;
}
return ret;
}
static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
union perf_event *event, struct perf_sample *sample,
struct evsel *evsel, struct machine *machine)
{
/* We know evsel != NULL. */
u64 sample_type = evsel->core.attr.sample_type;
u64 read_format = evsel->core.attr.read_format;
/* Standard sample delivery. */
if (!(sample_type & PERF_SAMPLE_READ))
return tool->sample(tool, event, sample, evsel, machine);
/* For PERF_SAMPLE_READ we have either single or group mode. */
if (read_format & PERF_FORMAT_GROUP)
return deliver_sample_group(evlist, tool, event, sample,
machine, read_format);
else
return deliver_sample_value(evlist, tool, event, sample,
&sample->read.one, machine);
}
static int machines__deliver_event(struct machines *machines,
struct evlist *evlist,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool, u64 file_offset,
const char *file_path)
{
struct evsel *evsel;
struct machine *machine;
dump_event(evlist, event, file_offset, sample, file_path);
evsel = evlist__id2evsel(evlist, sample->id);
machine = machines__find_for_cpumode(machines, event, sample);
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
if (evsel == NULL) {
++evlist->stats.nr_unknown_id;
return 0;
}
if (machine == NULL) {
++evlist->stats.nr_unprocessable_samples;
dump_sample(evsel, event, sample, perf_env__arch(NULL));
return 0;
}
dump_sample(evsel, event, sample, perf_env__arch(machine->env));
return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_MMAP2:
if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
++evlist->stats.nr_proc_map_timeout;
return tool->mmap2(tool, event, sample, machine);
case PERF_RECORD_COMM:
return tool->comm(tool, event, sample, machine);
case PERF_RECORD_NAMESPACES:
return tool->namespaces(tool, event, sample, machine);
case PERF_RECORD_CGROUP:
return tool->cgroup(tool, event, sample, machine);
case PERF_RECORD_FORK:
return tool->fork(tool, event, sample, machine);
case PERF_RECORD_EXIT:
return tool->exit(tool, event, sample, machine);
case PERF_RECORD_LOST:
if (tool->lost == perf_event__process_lost)
evlist->stats.total_lost += event->lost.lost;
return tool->lost(tool, event, sample, machine);
case PERF_RECORD_LOST_SAMPLES:
if (tool->lost_samples == perf_event__process_lost_samples &&
!(event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF))
evlist->stats.total_lost_samples += event->lost_samples.lost;
return tool->lost_samples(tool, event, sample, machine);
case PERF_RECORD_READ:
dump_read(evsel, event);
return tool->read(tool, event, sample, evsel, machine);
case PERF_RECORD_THROTTLE:
return tool->throttle(tool, event, sample, machine);
case PERF_RECORD_UNTHROTTLE:
return tool->unthrottle(tool, event, sample, machine);
case PERF_RECORD_AUX:
if (tool->aux == perf_event__process_aux) {
if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
evlist->stats.total_aux_lost += 1;
if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
evlist->stats.total_aux_partial += 1;
if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
evlist->stats.total_aux_collision += 1;
}
return tool->aux(tool, event, sample, machine);
case PERF_RECORD_ITRACE_START:
return tool->itrace_start(tool, event, sample, machine);
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
return tool->context_switch(tool, event, sample, machine);
case PERF_RECORD_KSYMBOL:
return tool->ksymbol(tool, event, sample, machine);
case PERF_RECORD_BPF_EVENT:
return tool->bpf(tool, event, sample, machine);
case PERF_RECORD_TEXT_POKE:
return tool->text_poke(tool, event, sample, machine);
case PERF_RECORD_AUX_OUTPUT_HW_ID:
return tool->aux_output_hw_id(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
}
}
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool,
u64 file_offset,
const char *file_path)
{
struct perf_sample sample;
int ret = evlist__parse_sample(session->evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
return ret;
}
ret = auxtrace__process_event(session, event, &sample, tool);
if (ret < 0)
return ret;
if (ret > 0)
return 0;
ret = machines__deliver_event(&session->machines, session->evlist,
event, &sample, tool, file_offset, file_path);
if (dump_trace && sample.aux_sample.size)
auxtrace__dump_auxtrace_sample(session, &sample);
return ret;
}
static s64 perf_session__process_user_event(struct perf_session *session,
union perf_event *event,
u64 file_offset,
const char *file_path)
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
struct perf_sample sample = { .time = 0, };
int fd = perf_data__fd(session->data);
int err;
if (event->header.type != PERF_RECORD_COMPRESSED ||
tool->compressed == perf_session__process_compressed_event_stub)
dump_event(session->evlist, event, file_offset, &sample, file_path);
/* These events are processed right away */
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
err = tool->attr(tool, event, &session->evlist);
if (err == 0) {
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
return err;
case PERF_RECORD_EVENT_UPDATE:
return tool->event_update(tool, event, &session->evlist);
case PERF_RECORD_HEADER_EVENT_TYPE:
/*
* Deprecated, but we need to handle it for sake
* of old data files create in pipe mode.
*/
return 0;
case PERF_RECORD_HEADER_TRACING_DATA:
/*
* Setup for reading amidst mmap, but only when we
* are in 'file' mode. The 'pipe' fd is in proper
* place already.
*/
if (!perf_data__is_pipe(session->data))
lseek(fd, file_offset, SEEK_SET);
return tool->tracing_data(session, event);
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(session, event);
case PERF_RECORD_FINISHED_ROUND:
return tool->finished_round(tool, event, oe);
case PERF_RECORD_ID_INDEX:
return tool->id_index(session, event);
case PERF_RECORD_AUXTRACE_INFO:
return tool->auxtrace_info(session, event);
case PERF_RECORD_AUXTRACE:
/*
* Setup for reading amidst mmap, but only when we
* are in 'file' mode. The 'pipe' fd is in proper
* place already.
*/
if (!perf_data__is_pipe(session->data))
lseek(fd, file_offset + event->header.size, SEEK_SET);
return tool->auxtrace(session, event);
case PERF_RECORD_AUXTRACE_ERROR:
perf_session__auxtrace_error_inc(session, event);
return tool->auxtrace_error(session, event);
case PERF_RECORD_THREAD_MAP:
return tool->thread_map(session, event);
case PERF_RECORD_CPU_MAP:
return tool->cpu_map(session, event);
case PERF_RECORD_STAT_CONFIG:
return tool->stat_config(session, event);
case PERF_RECORD_STAT:
return tool->stat(session, event);
case PERF_RECORD_STAT_ROUND:
return tool->stat_round(session, event);
case PERF_RECORD_TIME_CONV:
session->time_conv = event->time_conv;
return tool->time_conv(session, event);
case PERF_RECORD_HEADER_FEATURE:
return tool->feature(session, event);
case PERF_RECORD_COMPRESSED:
err = tool->compressed(session, event, file_offset, file_path);
if (err)
dump_event(session->evlist, event, file_offset, &sample, file_path);
return err;
case PERF_RECORD_FINISHED_INIT:
return tool->finished_init(session, event);
default:
return -EINVAL;
}
}
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
struct evlist *evlist = session->evlist;
struct perf_tool *tool = session->tool;
events_stats__inc(&evlist->stats, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, 0, NULL);
return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
}
static void event_swap(union perf_event *event, bool sample_id_all)
{
perf_event__swap_op swap;
swap = perf_event__swap_ops[event->header.type];
if (swap)
swap(event, sample_id_all);
}
int perf_session__peek_event(struct perf_session *session, off_t file_offset,
void *buf, size_t buf_sz,
union perf_event **event_ptr,
struct perf_sample *sample)
{
union perf_event *event;
size_t hdr_sz, rest;
int fd;
if (session->one_mmap && !session->header.needs_swap) {
event = file_offset - session->one_mmap_offset +
session->one_mmap_addr;
goto out_parse_sample;
}
if (perf_data__is_pipe(session->data))
return -1;
fd = perf_data__fd(session->data);
hdr_sz = sizeof(struct perf_event_header);
if (buf_sz < hdr_sz)
return -1;
if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
return -1;
event = (union perf_event *)buf;
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
if (event->header.size < hdr_sz || event->header.size > buf_sz)
return -1;
buf += hdr_sz;
rest = event->header.size - hdr_sz;
if (readn(fd, buf, rest) != (ssize_t)rest)
return -1;
if (session->header.needs_swap)
event_swap(event, evlist__sample_id_all(session->evlist));
out_parse_sample:
if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
evlist__parse_sample(session->evlist, event, sample))
return -1;
*event_ptr = event;
return 0;
}
int perf_session__peek_events(struct perf_session *session, u64 offset,
u64 size, peek_events_cb_t cb, void *data)
{
u64 max_offset = offset + size;
char buf[PERF_SAMPLE_MAX_SIZE];
union perf_event *event;
int err;
do {
err = perf_session__peek_event(session, offset, buf,
PERF_SAMPLE_MAX_SIZE, &event,
NULL);
if (err)
return err;
err = cb(session, event, offset, data);
if (err)
return err;
offset += event->header.size;
if (event->header.type == PERF_RECORD_AUXTRACE)
offset += event->auxtrace.size;
} while (offset < max_offset);
return err;
}
static s64 perf_session__process_event(struct perf_session *session,
union perf_event *event, u64 file_offset,
const char *file_path)
{
struct evlist *evlist = session->evlist;
struct perf_tool *tool = session->tool;
int ret;
if (session->header.needs_swap)
event_swap(event, evlist__sample_id_all(evlist));
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
events_stats__inc(&evlist->stats, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, file_offset, file_path);
if (tool->ordered_events) {
u64 timestamp = -1ULL;
ret = evlist__parse_sample_timestamp(evlist, event, ×tamp);
if (ret && ret != -1)
return ret;
ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
if (ret != -ETIME)
return ret;
}
return perf_session__deliver_event(session, event, tool, file_offset, file_path);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
{
hdr->type = bswap_32(hdr->type);
hdr->misc = bswap_16(hdr->misc);
hdr->size = bswap_16(hdr->size);
}
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
return machine__findnew_thread(&session->machines.host, -1, pid);
}
int perf_session__register_idle_thread(struct perf_session *session)
{
struct thread *thread = machine__idle_thread(&session->machines.host);
/* machine__idle_thread() got the thread, so put it */
thread__put(thread);
return thread ? 0 : -1;
}
static void
perf_session__warn_order(const struct perf_session *session)
{
const struct ordered_events *oe = &session->ordered_events;
struct evsel *evsel;
bool should_warn = true;
evlist__for_each_entry(session->evlist, evsel) {
if (evsel->core.attr.write_backward)
should_warn = false;
}
if (!should_warn)
return;
if (oe->nr_unordered_events != 0)
ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
}
static void perf_session__warn_about_errors(const struct perf_session *session)
{
const struct events_stats *stats = &session->evlist->stats;
if (session->tool->lost == perf_event__process_lost &&
stats->nr_events[PERF_RECORD_LOST] != 0) {
ui__warning("Processed %d events and lost %d chunks!\n\n"
"Check IO/CPU overload!\n\n",
stats->nr_events[0],
stats->nr_events[PERF_RECORD_LOST]);
}
if (session->tool->lost_samples == perf_event__process_lost_samples) {
double drop_rate;
drop_rate = (double)stats->total_lost_samples /
(double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
if (drop_rate > 0.05) {
ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
drop_rate * 100.0);
}
}
if (session->tool->aux == perf_event__process_aux &&
stats->total_aux_lost != 0) {
ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
stats->total_aux_lost,
stats->nr_events[PERF_RECORD_AUX]);
}
if (session->tool->aux == perf_event__process_aux &&
stats->total_aux_partial != 0) {
bool vmm_exclusive = false;
(void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
&vmm_exclusive);
ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
"Are you running a KVM guest in the background?%s\n\n",
stats->total_aux_partial,
stats->nr_events[PERF_RECORD_AUX],
vmm_exclusive ?
"\nReloading kvm_intel module with vmm_exclusive=0\n"
"will reduce the gaps to only guest's timeslices." :
"");
}
if (session->tool->aux == perf_event__process_aux &&
stats->total_aux_collision != 0) {
ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n",
stats->total_aux_collision,
stats->nr_events[PERF_RECORD_AUX]);
}
if (stats->nr_unknown_events != 0) {
ui__warning("Found %u unknown events!\n\n"
"Is this an older tool processing a perf.data "
"file generated by a more recent tool?\n\n"
"If that is not the case, consider "
"reporting to [email protected].\n\n",
stats->nr_unknown_events);
}
if (stats->nr_unknown_id != 0) {
ui__warning("%u samples with id not present in the header\n",
stats->nr_unknown_id);
}
if (stats->nr_invalid_chains != 0) {
ui__warning("Found invalid callchains!\n\n"
"%u out of %u events were discarded for this reason.\n\n"
"Consider reporting to [email protected].\n\n",
stats->nr_invalid_chains,
stats->nr_events[PERF_RECORD_SAMPLE]);
}
if (stats->nr_unprocessable_samples != 0) {
ui__warning("%u unprocessable samples recorded.\n"
"Do you have a KVM guest running and not using 'perf kvm'?\n",
stats->nr_unprocessable_samples);
}
perf_session__warn_order(session);
events_stats__auxtrace_error_warn(stats);
if (stats->nr_proc_map_timeout != 0) {
ui__warning("%d map information files for pre-existing threads were\n"
"not processed, if there are samples for addresses they\n"
"will not be resolved, you may find out which are these\n"
"threads by running with -v and redirecting the output\n"
"to a file.\n"
"The time limit to process proc map is too short?\n"
"Increase it by --proc-map-timeout\n",
stats->nr_proc_map_timeout);
}
}
static int perf_session__flush_thread_stack(struct thread *thread,
void *p __maybe_unused)
{
return thread_stack__flush(thread);
}
static int perf_session__flush_thread_stacks(struct perf_session *session)
{
return machines__for_each_thread(&session->machines,
perf_session__flush_thread_stack,
NULL);
}
volatile sig_atomic_t session_done;
static int __perf_session__process_decomp_events(struct perf_session *session);
static int __perf_session__process_pipe_events(struct perf_session *session)
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
union perf_event *event;
uint32_t size, cur_size = 0;
void *buf = NULL;
s64 skip = 0;
u64 head;
ssize_t err;
void *p;
perf_tool__fill_defaults(tool);
head = 0;
cur_size = sizeof(union perf_event);
buf = malloc(cur_size);
if (!buf)
return -errno;
ordered_events__set_copy_on_queue(oe, true);
more:
event = buf;
err = perf_data__read(session->data, event,
sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0)
goto done;
pr_err("failed to read event header\n");
goto out_err;
}
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
size = event->header.size;
if (size < sizeof(struct perf_event_header)) {
pr_err("bad event header size\n");
goto out_err;
}
if (size > cur_size) {
void *new = realloc(buf, size);
if (!new) {
pr_err("failed to allocate memory to read event\n");
goto out_err;
}
buf = new;
cur_size = size;
event = buf;
}
p = event;
p += sizeof(struct perf_event_header);
if (size - sizeof(struct perf_event_header)) {
err = perf_data__read(session->data, p,
size - sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0) {
pr_err("unexpected end of event stream\n");
goto done;
}
pr_err("failed to read event data\n");
goto out_err;
}
}
if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
head, event->header.size, event->header.type);
err = -EINVAL;
goto out_err;
}
head += size;
if (skip > 0)
head += skip;
err = __perf_session__process_decomp_events(session);
if (err)
goto out_err;
if (!session_done())
goto more;
done:
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
goto out_err;
err = auxtrace__flush_events(session, tool);
if (err)
goto out_err;
err = perf_session__flush_thread_stacks(session);
out_err:
free(buf);
if (!tool->no_warn)
perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
auxtrace__free_events(session);
return err;
}
static union perf_event *
prefetch_event(char *buf, u64 head, size_t mmap_size,
bool needs_swap, union perf_event *error)
{
union perf_event *event;
u16 event_size;
/*
* Ensure we have enough space remaining to read
* the size of the event in the headers.
*/
if (head + sizeof(event->header) > mmap_size)
return NULL;
event = (union perf_event *)(buf + head);
if (needs_swap)
perf_event_header__bswap(&event->header);
event_size = event->header.size;
if (head + event_size <= mmap_size)
return event;
/* We're not fetching the event so swap back again */
if (needs_swap)
perf_event_header__bswap(&event->header);
/* Check if the event fits into the next mmapped buf. */
if (event_size <= mmap_size - head % page_size) {
/* Remap buf and fetch again. */
return NULL;
}
/* Invalid input. Event size should never exceed mmap_size. */
pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
" fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
return error;
}
static union perf_event *
fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
{
return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
}
static union perf_event *
fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
{
return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
}
static int __perf_session__process_decomp_events(struct perf_session *session)
{
s64 skip;
u64 size;
struct decomp *decomp = session->active_decomp->decomp_last;
if (!decomp)
return 0;
while (decomp->head < decomp->size && !session_done()) {
union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
session->header.needs_swap);
if (!event)
break;
size = event->header.size;
if (size < sizeof(struct perf_event_header) ||
(skip = perf_session__process_event(session, event, decomp->file_pos,
decomp->file_path)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
decomp->file_pos + decomp->head, event->header.size, event->header.type);
return -EINVAL;
}
if (skip)
size += skip;
decomp->head += size;
}
return 0;
}
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
*/
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif
struct reader;
typedef s64 (*reader_cb_t)(struct perf_session *session,
union perf_event *event,
u64 file_offset,
const char *file_path);
struct reader {
int fd;
const char *path;
u64 data_size;
u64 data_offset;
reader_cb_t process;
bool in_place_update;
char *mmaps[NUM_MMAPS];
size_t mmap_size;
int mmap_idx;
char *mmap_cur;
u64 file_pos;
u64 file_offset;
u64 head;
u64 size;
bool done;
struct zstd_data zstd_data;
struct decomp_data decomp_data;
};
static int
reader__init(struct reader *rd, bool *one_mmap)
{
u64 data_size = rd->data_size;
char **mmaps = rd->mmaps;
rd->head = rd->data_offset;
data_size += rd->data_offset;
rd->mmap_size = MMAP_SIZE;
if (rd->mmap_size > data_size) {
rd->mmap_size = data_size;
if (one_mmap)
*one_mmap = true;
}
memset(mmaps, 0, sizeof(rd->mmaps));
if (zstd_init(&rd->zstd_data, 0))
return -1;
rd->decomp_data.zstd_decomp = &rd->zstd_data;
return 0;
}
static void
reader__release_decomp(struct reader *rd)
{
perf_decomp__release_events(rd->decomp_data.decomp);
zstd_fini(&rd->zstd_data);
}
static int
reader__mmap(struct reader *rd, struct perf_session *session)
{
int mmap_prot, mmap_flags;
char *buf, **mmaps = rd->mmaps;
u64 page_offset;
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
if (rd->in_place_update) {
mmap_prot |= PROT_WRITE;
} else if (session->header.needs_swap) {
mmap_prot |= PROT_WRITE;
mmap_flags = MAP_PRIVATE;
}
if (mmaps[rd->mmap_idx]) {
munmap(mmaps[rd->mmap_idx], rd->mmap_size);
mmaps[rd->mmap_idx] = NULL;
}
page_offset = page_size * (rd->head / page_size);
rd->file_offset += page_offset;
rd->head -= page_offset;
buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
rd->file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
return -errno;
}
mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
rd->file_pos = rd->file_offset + rd->head;
if (session->one_mmap) {
session->one_mmap_addr = buf;
session->one_mmap_offset = rd->file_offset;
}
return 0;
}
enum {
READER_OK,
READER_NODATA,
};
static int
reader__read_event(struct reader *rd, struct perf_session *session,
struct ui_progress *prog)
{
u64 size;
int err = READER_OK;
union perf_event *event;
s64 skip;
event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
session->header.needs_swap);
if (IS_ERR(event))
return PTR_ERR(event);
if (!event)
return READER_NODATA;
size = event->header.size;
skip = -EINVAL;
if (size < sizeof(struct perf_event_header) ||
(skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
rd->file_offset + rd->head, event->header.size,
event->header.type, strerror(-skip));
err = skip;
goto out;
}
if (skip)
size += skip;
rd->size += size;
rd->head += size;
rd->file_pos += size;
err = __perf_session__process_decomp_events(session);
if (err)
goto out;
ui_progress__update(prog, size);
out:
return err;
}
static inline bool
reader__eof(struct reader *rd)
{
return (rd->file_pos >= rd->data_size + rd->data_offset);
}
static int
reader__process_events(struct reader *rd, struct perf_session *session,
struct ui_progress *prog)
{
int err;
err = reader__init(rd, &session->one_mmap);
if (err)
goto out;
session->active_decomp = &rd->decomp_data;
remap:
err = reader__mmap(rd, session);
if (err)
goto out;
more:
err = reader__read_event(rd, session, prog);
if (err < 0)
goto out;
else if (err == READER_NODATA)
goto remap;
if (session_done())
goto out;
if (!reader__eof(rd))
goto more;
out:
session->active_decomp = &session->decomp_data;
return err;
}
static s64 process_simple(struct perf_session *session,
union perf_event *event,
u64 file_offset,
const char *file_path)
{
return perf_session__process_event(session, event, file_offset, file_path);
}
static int __perf_session__process_events(struct perf_session *session)
{
struct reader rd = {
.fd = perf_data__fd(session->data),
.path = session->data->file.path,
.data_size = session->header.data_size,
.data_offset = session->header.data_offset,
.process = process_simple,
.in_place_update = session->data->in_place_update,
};
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
struct ui_progress prog;
int err;
perf_tool__fill_defaults(tool);
if (rd.data_size == 0)
return -1;
ui_progress__init_size(&prog, rd.data_size, "Processing events...");
err = reader__process_events(&rd, session, &prog);
if (err)
goto out_err;
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
goto out_err;
err = auxtrace__flush_events(session, tool);
if (err)
goto out_err;
err = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
if (!tool->no_warn)
perf_session__warn_about_errors(session);
/*
* We may switching perf.data output, make ordered_events
* reusable.
*/
ordered_events__reinit(&session->ordered_events);
auxtrace__free_events(session);
reader__release_decomp(&rd);
session->one_mmap = false;
return err;
}
/*
* Processing 2 MB of data from each reader in sequence,
* because that's the way the ordered events sorting works
* most efficiently.
*/
#define READER_MAX_SIZE (2 * 1024 * 1024)
/*
* This function reads, merge and process directory data.
* It assumens the version 1 of directory data, where each
* data file holds per-cpu data, already sorted by kernel.
*/
static int __perf_session__process_dir_events(struct perf_session *session)
{
struct perf_data *data = session->data;
struct perf_tool *tool = session->tool;
int i, ret, readers, nr_readers;
struct ui_progress prog;
u64 total_size = perf_data__size(session->data);
struct reader *rd;
perf_tool__fill_defaults(tool);
ui_progress__init_size(&prog, total_size, "Sorting events...");
nr_readers = 1;
for (i = 0; i < data->dir.nr; i++) {
if (data->dir.files[i].size)
nr_readers++;
}
rd = zalloc(nr_readers * sizeof(struct reader));
if (!rd)
return -ENOMEM;
rd[0] = (struct reader) {
.fd = perf_data__fd(session->data),
.path = session->data->file.path,
.data_size = session->header.data_size,
.data_offset = session->header.data_offset,
.process = process_simple,
.in_place_update = session->data->in_place_update,
};
ret = reader__init(&rd[0], NULL);
if (ret)
goto out_err;
ret = reader__mmap(&rd[0], session);
if (ret)
goto out_err;
readers = 1;
for (i = 0; i < data->dir.nr; i++) {
if (!data->dir.files[i].size)
continue;
rd[readers] = (struct reader) {
.fd = data->dir.files[i].fd,
.path = data->dir.files[i].path,
.data_size = data->dir.files[i].size,
.data_offset = 0,
.process = process_simple,
.in_place_update = session->data->in_place_update,
};
ret = reader__init(&rd[readers], NULL);
if (ret)
goto out_err;
ret = reader__mmap(&rd[readers], session);
if (ret)
goto out_err;
readers++;
}
i = 0;
while (readers) {
if (session_done())
break;
if (rd[i].done) {
i = (i + 1) % nr_readers;
continue;
}
if (reader__eof(&rd[i])) {
rd[i].done = true;
readers--;
continue;
}
session->active_decomp = &rd[i].decomp_data;
ret = reader__read_event(&rd[i], session, &prog);
if (ret < 0) {
goto out_err;
} else if (ret == READER_NODATA) {
ret = reader__mmap(&rd[i], session);
if (ret)
goto out_err;
}
if (rd[i].size >= READER_MAX_SIZE) {
rd[i].size = 0;
i = (i + 1) % nr_readers;
}
}
ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
if (ret)
goto out_err;
ret = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
if (!tool->no_warn)
perf_session__warn_about_errors(session);
/*
* We may switching perf.data output, make ordered_events
* reusable.
*/
ordered_events__reinit(&session->ordered_events);
session->one_mmap = false;
session->active_decomp = &session->decomp_data;
for (i = 0; i < nr_readers; i++)
reader__release_decomp(&rd[i]);
zfree(&rd);
return ret;
}
int perf_session__process_events(struct perf_session *session)
{
if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
if (perf_data__is_pipe(session->data))
return __perf_session__process_pipe_events(session);
if (perf_data__is_dir(session->data) && session->data->dir.nr)
return __perf_session__process_dir_events(session);
return __perf_session__process_events(session);
}
bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
struct evsel *evsel;
evlist__for_each_entry(session->evlist, evsel) {
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
return true;
}
pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
return false;
}
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
{
char *bracket;
struct ref_reloc_sym *ref;
struct kmap *kmap;
ref = zalloc(sizeof(struct ref_reloc_sym));
if (ref == NULL)
return -ENOMEM;
ref->name = strdup(symbol_name);
if (ref->name == NULL) {
free(ref);
return -ENOMEM;
}
bracket = strchr(ref->name, ']');
if (bracket)
*bracket = '\0';
ref->addr = addr;
kmap = map__kmap(map);
if (kmap)
kmap->ref_reloc_sym = ref;
return 0;
}
size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
{
return machines__fprintf_dsos(&session->machines, fp);
}
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
}
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
bool skip_empty)
{
size_t ret;
const char *msg = "";
if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
return ret;
}
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
/*
* FIXME: Here we have to actually print all the machines in this
* session, not just the host...
*/
return machine__fprintf(&session->machines.host, fp);
}
struct evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type)
{
struct evsel *pos;
evlist__for_each_entry(session->evlist, pos) {
if (pos->core.attr.type == type)
return pos;
}
return NULL;
}
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap)
{
int i, err = -1;
struct perf_cpu_map *map;
int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
for (i = 0; i < PERF_TYPE_MAX; ++i) {
struct evsel *evsel;
evsel = perf_session__find_first_evtype(session, i);
if (!evsel)
continue;
if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
pr_err("File does not contain CPU events. "
"Remove -C option to proceed.\n");
return -1;
}
}
map = perf_cpu_map__new(cpu_list);
if (map == NULL) {
pr_err("Invalid cpu_list\n");
return -1;
}
for (i = 0; i < perf_cpu_map__nr(map); i++) {
struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
if (cpu.cpu >= nr_cpus) {
pr_err("Requested CPU %d too large. "
"Consider raising MAX_NR_CPUS\n", cpu.cpu);
goto out_delete_map;
}
__set_bit(cpu.cpu, cpu_bitmap);
}
err = 0;
out_delete_map:
perf_cpu_map__put(map);
return err;
}
void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
bool full)
{
if (session == NULL || fp == NULL)
return;
fprintf(fp, "# ========\n");
perf_header__fprintf_info(session, fp, full);
fprintf(fp, "# ========\n#\n");
}
static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
{
struct machine *machine = machines__findnew(&session->machines, machine_pid);
struct thread *thread;
if (!machine)
return -ENOMEM;
machine->single_address_space = session->machines.host.single_address_space;
thread = machine__idle_thread(machine);
if (!thread)
return -ENOMEM;
thread__put(thread);
machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
return 0;
}
static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
pid_t tid, int guest_cpu)
{
struct machine *machine = &session->machines.host;
struct thread *thread = machine__findnew_thread(machine, pid, tid);
if (!thread)
return -ENOMEM;
thread__set_guest_cpu(thread, guest_cpu);
thread__put(thread);
return 0;
}
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event)
{
struct evlist *evlist = session->evlist;
struct perf_record_id_index *ie = &event->id_index;
size_t sz = ie->header.size - sizeof(*ie);
size_t i, nr, max_nr;
size_t e1_sz = sizeof(struct id_index_entry);
size_t e2_sz = sizeof(struct id_index_entry_2);
size_t etot_sz = e1_sz + e2_sz;
struct id_index_entry_2 *e2;
pid_t last_pid = 0;
max_nr = sz / e1_sz;
nr = ie->nr;
if (nr > max_nr) {
printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
return -EINVAL;
}
if (sz >= nr * etot_sz) {
max_nr = sz / etot_sz;
if (nr > max_nr) {
printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
return -EINVAL;
}
e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
} else {
e2 = NULL;
}
if (dump_trace)
fprintf(stdout, " nr: %zu\n", nr);
for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
struct id_index_entry *e = &ie->entries[i];
struct perf_sample_id *sid;
int ret;
if (dump_trace) {
fprintf(stdout, " ... id: %"PRI_lu64, e->id);
fprintf(stdout, " idx: %"PRI_lu64, e->idx);
fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
fprintf(stdout, " tid: %"PRI_ld64, e->tid);
if (e2) {
fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid);
fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu);
} else {
fprintf(stdout, "\n");
}
}
sid = evlist__id2sid(evlist, e->id);
if (!sid)
return -ENOENT;
sid->idx = e->idx;
sid->cpu.cpu = e->cpu;
sid->tid = e->tid;
if (!e2)
continue;
sid->machine_pid = e2->machine_pid;
sid->vcpu.cpu = e2->vcpu;
if (!sid->machine_pid)
continue;
if (sid->machine_pid != last_pid) {
ret = perf_session__register_guest(session, sid->machine_pid);
if (ret)
return ret;
last_pid = sid->machine_pid;
perf_guest = true;
}
ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
if (ret)
return ret;
}
return 0;
}
| linux-master | tools/perf/util/session.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*
* Refactored from builtin-top.c, see that files for further copyright notes.
*/
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "parse-events.h"
#include "symbol.h"
#include "top.h"
#include "util.h"
#include <inttypes.h>
#define SNPRINTF(buf, size, fmt, args...) \
({ \
size_t r = snprintf(buf, size, fmt, ## args); \
r > size ? size : r; \
})
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
{
float samples_per_sec;
float ksamples_per_sec;
float esamples_percent;
struct record_opts *opts = &top->record_opts;
struct target *target = &opts->target;
size_t ret = 0;
if (top->samples) {
samples_per_sec = top->samples / top->delay_secs;
ksamples_per_sec = top->kernel_samples / top->delay_secs;
esamples_percent = (100.0 * top->exact_samples) / top->samples;
} else {
samples_per_sec = ksamples_per_sec = esamples_percent = 0.0;
}
if (!perf_guest) {
float ksamples_percent = 0.0;
if (samples_per_sec)
ksamples_percent = (100.0 * ksamples_per_sec) /
samples_per_sec;
ret = SNPRINTF(bf, size,
" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
" exact: %4.1f%% lost: %" PRIu64 "/%" PRIu64 " drop: %" PRIu64 "/%" PRIu64 " [",
samples_per_sec, ksamples_percent, esamples_percent,
top->lost, top->lost_total, top->drop, top->drop_total);
} else {
float us_samples_per_sec = top->us_samples / top->delay_secs;
float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
ret = SNPRINTF(bf, size,
" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
" guest kernel:%4.1f%% guest us:%4.1f%%"
" exact: %4.1f%% [", samples_per_sec,
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec -
guest_kernel_samples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec -
guest_us_samples_per_sec) /
samples_per_sec)),
esamples_percent);
}
if (top->evlist->core.nr_entries == 1) {
struct evsel *first = evlist__first(top->evlist);
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
(uint64_t)first->core.attr.sample_period,
opts->freq ? "Hz" : "");
}
ret += SNPRINTF(bf + ret, size - ret, "%s", evsel__name(top->sym_evsel));
ret += SNPRINTF(bf + ret, size - ret, "], ");
if (target->pid)
ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
target->pid);
else if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
target->tid);
else if (target->uid_str != NULL)
ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
target->uid_str);
else
ret += SNPRINTF(bf + ret, size - ret, " (all");
if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
? "s" : "",
target->cpu_list);
else {
if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
? "s" : "");
}
perf_top__reset_sample_counters(top);
return ret;
}
void perf_top__reset_sample_counters(struct perf_top *top)
{
top->samples = top->us_samples = top->kernel_samples =
top->exact_samples = top->guest_kernel_samples =
top->guest_us_samples = top->lost = top->drop = 0;
}
| linux-master | tools/perf/util/top.c |
// SPDX-License-Identifier: GPL-2.0
/*
* bpf_kwork.c
*
* Copyright (c) 2022 Huawei Inc, Yang Jihong <[email protected]>
*/
#include <time.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/time64.h>
#include "util/debug.h"
#include "util/evsel.h"
#include "util/kwork.h"
#include <bpf/bpf.h>
#include <perf/cpumap.h>
#include "util/bpf_skel/kwork_trace.skel.h"
/*
* This should be in sync with "util/kwork_trace.bpf.c"
*/
#define MAX_KWORKNAME 128
struct work_key {
u32 type;
u32 cpu;
u64 id;
};
struct report_data {
u64 nr;
u64 total_time;
u64 max_time;
u64 max_time_start;
u64 max_time_end;
};
struct kwork_class_bpf {
struct kwork_class *class;
void (*load_prepare)(struct perf_kwork *kwork);
int (*get_work_name)(struct work_key *key, char **ret_name);
};
static struct kwork_trace_bpf *skel;
static struct timespec ts_start;
static struct timespec ts_end;
void perf_kwork__trace_start(void)
{
clock_gettime(CLOCK_MONOTONIC, &ts_start);
skel->bss->enabled = 1;
}
void perf_kwork__trace_finish(void)
{
clock_gettime(CLOCK_MONOTONIC, &ts_end);
skel->bss->enabled = 0;
}
static int get_work_name_from_map(struct work_key *key, char **ret_name)
{
char name[MAX_KWORKNAME] = { 0 };
int fd = bpf_map__fd(skel->maps.perf_kwork_names);
*ret_name = NULL;
if (fd < 0) {
pr_debug("Invalid names map fd\n");
return 0;
}
if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) {
*ret_name = strdup(name);
if (*ret_name == NULL) {
pr_err("Failed to copy work name\n");
return -1;
}
}
return 0;
}
static void irq_load_prepare(struct perf_kwork *kwork)
{
if (kwork->report == KWORK_REPORT_RUNTIME) {
bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true);
bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true);
}
}
static struct kwork_class_bpf kwork_irq_bpf = {
.load_prepare = irq_load_prepare,
.get_work_name = get_work_name_from_map,
};
static void softirq_load_prepare(struct perf_kwork *kwork)
{
if (kwork->report == KWORK_REPORT_RUNTIME) {
bpf_program__set_autoload(skel->progs.report_softirq_entry, true);
bpf_program__set_autoload(skel->progs.report_softirq_exit, true);
} else if (kwork->report == KWORK_REPORT_LATENCY) {
bpf_program__set_autoload(skel->progs.latency_softirq_raise, true);
bpf_program__set_autoload(skel->progs.latency_softirq_entry, true);
}
}
static struct kwork_class_bpf kwork_softirq_bpf = {
.load_prepare = softirq_load_prepare,
.get_work_name = get_work_name_from_map,
};
static void workqueue_load_prepare(struct perf_kwork *kwork)
{
if (kwork->report == KWORK_REPORT_RUNTIME) {
bpf_program__set_autoload(skel->progs.report_workqueue_execute_start, true);
bpf_program__set_autoload(skel->progs.report_workqueue_execute_end, true);
} else if (kwork->report == KWORK_REPORT_LATENCY) {
bpf_program__set_autoload(skel->progs.latency_workqueue_activate_work, true);
bpf_program__set_autoload(skel->progs.latency_workqueue_execute_start, true);
}
}
static struct kwork_class_bpf kwork_workqueue_bpf = {
.load_prepare = workqueue_load_prepare,
.get_work_name = get_work_name_from_map,
};
static struct kwork_class_bpf *
kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
[KWORK_CLASS_IRQ] = &kwork_irq_bpf,
[KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf,
[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue_bpf,
};
static bool valid_kwork_class_type(enum kwork_class_type type)
{
return type >= 0 && type < KWORK_CLASS_MAX ? true : false;
}
static int setup_filters(struct perf_kwork *kwork)
{
u8 val = 1;
int i, nr_cpus, key, fd;
struct perf_cpu_map *map;
if (kwork->cpu_list != NULL) {
fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
if (fd < 0) {
pr_debug("Invalid cpu filter fd\n");
return -1;
}
map = perf_cpu_map__new(kwork->cpu_list);
if (map == NULL) {
pr_debug("Invalid cpu_list\n");
return -1;
}
nr_cpus = libbpf_num_possible_cpus();
for (i = 0; i < perf_cpu_map__nr(map); i++) {
struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
if (cpu.cpu >= nr_cpus) {
perf_cpu_map__put(map);
pr_err("Requested cpu %d too large\n", cpu.cpu);
return -1;
}
bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
}
perf_cpu_map__put(map);
skel->bss->has_cpu_filter = 1;
}
if (kwork->profile_name != NULL) {
if (strlen(kwork->profile_name) >= MAX_KWORKNAME) {
pr_err("Requested name filter %s too large, limit to %d\n",
kwork->profile_name, MAX_KWORKNAME - 1);
return -1;
}
fd = bpf_map__fd(skel->maps.perf_kwork_name_filter);
if (fd < 0) {
pr_debug("Invalid name filter fd\n");
return -1;
}
key = 0;
bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY);
skel->bss->has_name_filter = 1;
}
return 0;
}
int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork)
{
struct bpf_program *prog;
struct kwork_class *class;
struct kwork_class_bpf *class_bpf;
enum kwork_class_type type;
skel = kwork_trace_bpf__open();
if (!skel) {
pr_debug("Failed to open kwork trace skeleton\n");
return -1;
}
/*
* set all progs to non-autoload,
* then set corresponding progs according to config
*/
bpf_object__for_each_program(prog, skel->obj)
bpf_program__set_autoload(prog, false);
list_for_each_entry(class, &kwork->class_list, list) {
type = class->type;
if (!valid_kwork_class_type(type) ||
(kwork_class_bpf_supported_list[type] == NULL)) {
pr_err("Unsupported bpf trace class %s\n", class->name);
goto out;
}
class_bpf = kwork_class_bpf_supported_list[type];
class_bpf->class = class;
if (class_bpf->load_prepare != NULL)
class_bpf->load_prepare(kwork);
}
if (kwork_trace_bpf__load(skel)) {
pr_debug("Failed to load kwork trace skeleton\n");
goto out;
}
if (setup_filters(kwork))
goto out;
if (kwork_trace_bpf__attach(skel)) {
pr_debug("Failed to attach kwork trace skeleton\n");
goto out;
}
return 0;
out:
kwork_trace_bpf__destroy(skel);
return -1;
}
static int add_work(struct perf_kwork *kwork,
struct work_key *key,
struct report_data *data)
{
struct kwork_work *work;
struct kwork_class_bpf *bpf_trace;
struct kwork_work tmp = {
.id = key->id,
.name = NULL,
.cpu = key->cpu,
};
enum kwork_class_type type = key->type;
if (!valid_kwork_class_type(type)) {
pr_debug("Invalid class type %d to add work\n", type);
return -1;
}
bpf_trace = kwork_class_bpf_supported_list[type];
tmp.class = bpf_trace->class;
if ((bpf_trace->get_work_name != NULL) &&
(bpf_trace->get_work_name(key, &tmp.name)))
return -1;
work = perf_kwork_add_work(kwork, tmp.class, &tmp);
if (work == NULL)
return -1;
if (kwork->report == KWORK_REPORT_RUNTIME) {
work->nr_atoms = data->nr;
work->total_runtime = data->total_time;
work->max_runtime = data->max_time;
work->max_runtime_start = data->max_time_start;
work->max_runtime_end = data->max_time_end;
} else if (kwork->report == KWORK_REPORT_LATENCY) {
work->nr_atoms = data->nr;
work->total_latency = data->total_time;
work->max_latency = data->max_time;
work->max_latency_start = data->max_time_start;
work->max_latency_end = data->max_time_end;
} else {
pr_debug("Invalid bpf report type %d\n", kwork->report);
return -1;
}
kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec;
kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec;
return 0;
}
int perf_kwork__report_read_bpf(struct perf_kwork *kwork)
{
struct report_data data;
struct work_key key = {
.type = 0,
.cpu = 0,
.id = 0,
};
struct work_key prev = {
.type = 0,
.cpu = 0,
.id = 0,
};
int fd = bpf_map__fd(skel->maps.perf_kwork_report);
if (fd < 0) {
pr_debug("Invalid report fd\n");
return -1;
}
while (!bpf_map_get_next_key(fd, &prev, &key)) {
if ((bpf_map_lookup_elem(fd, &key, &data)) != 0) {
pr_debug("Failed to lookup report elem\n");
return -1;
}
if ((data.nr != 0) && (add_work(kwork, &key, &data) != 0))
return -1;
prev = key;
}
return 0;
}
void perf_kwork__report_cleanup_bpf(void)
{
kwork_trace_bpf__destroy(skel);
}
| linux-master | tools/perf/util/bpf_kwork.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dlfilter.c: Interface to perf script --dlfilter shared object
* Copyright (c) 2021, Intel Corporation.
*/
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <dirent.h>
#include <subcmd/exec-cmd.h>
#include <linux/zalloc.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include "debug.h"
#include "event.h"
#include "evsel.h"
#include "dso.h"
#include "map.h"
#include "thread.h"
#include "trace-event.h"
#include "symbol.h"
#include "srcline.h"
#include "dlfilter.h"
#include "../include/perf/perf_dlfilter.h"
static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
{
struct symbol *sym = al->sym;
d_al->size = sizeof(*d_al);
if (al->map) {
struct dso *dso = map__dso(al->map);
if (symbol_conf.show_kernel_path && dso->long_name)
d_al->dso = dso->long_name;
else
d_al->dso = dso->name;
d_al->is_64_bit = dso->is_64_bit;
d_al->buildid_size = dso->bid.size;
d_al->buildid = dso->bid.data;
} else {
d_al->dso = NULL;
d_al->is_64_bit = 0;
d_al->buildid_size = 0;
d_al->buildid = NULL;
}
if (sym) {
d_al->sym = sym->name;
d_al->sym_start = sym->start;
d_al->sym_end = sym->end;
if (al->addr < sym->end)
d_al->symoff = al->addr - sym->start;
else
d_al->symoff = al->addr - map__start(al->map) - sym->start;
d_al->sym_binding = sym->binding;
} else {
d_al->sym = NULL;
d_al->sym_start = 0;
d_al->sym_end = 0;
d_al->symoff = 0;
d_al->sym_binding = 0;
}
d_al->addr = al->addr;
d_al->comm = NULL;
d_al->filtered = 0;
d_al->priv = NULL;
}
static struct addr_location *get_al(struct dlfilter *d)
{
struct addr_location *al = d->al;
if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
return NULL;
return al;
}
static struct thread *get_thread(struct dlfilter *d)
{
struct addr_location *al = get_al(d);
return al ? al->thread : NULL;
}
static const struct perf_dlfilter_al *dlfilter__resolve_ip(void *ctx)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct perf_dlfilter_al *d_al = d->d_ip_al;
struct addr_location *al;
if (!d->ctx_valid)
return NULL;
/* 'size' is also used to indicate already initialized */
if (d_al->size)
return d_al;
al = get_al(d);
if (!al)
return NULL;
al_to_d_al(al, d_al);
d_al->is_kernel_ip = machine__kernel_ip(d->machine, d->sample->ip);
d_al->comm = al->thread ? thread__comm_str(al->thread) : ":-1";
d_al->filtered = al->filtered;
return d_al;
}
static const struct perf_dlfilter_al *dlfilter__resolve_addr(void *ctx)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct perf_dlfilter_al *d_addr_al = d->d_addr_al;
struct addr_location *addr_al = d->addr_al;
if (!d->ctx_valid || !d->d_sample->addr_correlates_sym)
return NULL;
/* 'size' is also used to indicate already initialized */
if (d_addr_al->size)
return d_addr_al;
if (!addr_al->thread) {
struct thread *thread = get_thread(d);
if (!thread)
return NULL;
thread__resolve(thread, addr_al, d->sample);
}
al_to_d_al(addr_al, d_addr_al);
d_addr_al->is_kernel_ip = machine__kernel_ip(d->machine, d->sample->addr);
return d_addr_al;
}
static char **dlfilter__args(void *ctx, int *dlargc)
{
struct dlfilter *d = (struct dlfilter *)ctx;
if (dlargc)
*dlargc = 0;
else
return NULL;
if (!d->ctx_valid && !d->in_start && !d->in_stop)
return NULL;
*dlargc = d->dlargc;
return d->dlargv;
}
static bool has_priv(struct perf_dlfilter_al *d_al_p)
{
return d_al_p->size >= offsetof(struct perf_dlfilter_al, priv) + sizeof(d_al_p->priv);
}
static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlfilter_al *d_al_p)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct perf_dlfilter_al d_al;
struct addr_location al;
struct thread *thread;
__u32 sz;
if (!d->ctx_valid || !d_al_p)
return -1;
thread = get_thread(d);
if (!thread)
return -1;
addr_location__init(&al);
thread__find_symbol_fb(thread, d->sample->cpumode, address, &al);
al_to_d_al(&al, &d_al);
d_al.is_kernel_ip = machine__kernel_ip(d->machine, address);
sz = d_al_p->size;
memcpy(d_al_p, &d_al, min((size_t)sz, sizeof(d_al)));
d_al_p->size = sz;
if (has_priv(d_al_p))
d_al_p->priv = memdup(&al, sizeof(al));
else /* Avoid leak for v0 API */
addr_location__exit(&al);
return 0;
}
static void dlfilter__al_cleanup(void *ctx __maybe_unused, struct perf_dlfilter_al *d_al_p)
{
struct addr_location *al;
/* Ensure backward compatibility */
if (!has_priv(d_al_p) || !d_al_p->priv)
return;
al = d_al_p->priv;
d_al_p->priv = NULL;
addr_location__exit(al);
free(al);
}
static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
{
struct dlfilter *d = (struct dlfilter *)ctx;
if (!len)
return NULL;
*len = 0;
if (!d->ctx_valid)
return NULL;
if (d->sample->ip && !d->sample->insn_len) {
struct addr_location *al = d->al;
if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
return NULL;
if (thread__maps(al->thread)) {
struct machine *machine = maps__machine(thread__maps(al->thread));
if (machine)
script_fetch_insn(d->sample, al->thread, machine);
}
}
if (!d->sample->insn_len)
return NULL;
*len = d->sample->insn_len;
return (__u8 *)d->sample->insn;
}
static const char *dlfilter__srcline(void *ctx, __u32 *line_no)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct addr_location *al;
unsigned int line = 0;
char *srcfile = NULL;
struct map *map;
struct dso *dso;
u64 addr;
if (!d->ctx_valid || !line_no)
return NULL;
al = get_al(d);
if (!al)
return NULL;
map = al->map;
addr = al->addr;
dso = map ? map__dso(map) : NULL;
if (dso)
srcfile = get_srcline_split(dso, map__rip_2objdump(map, addr), &line);
*line_no = line;
return srcfile;
}
static struct perf_event_attr *dlfilter__attr(void *ctx)
{
struct dlfilter *d = (struct dlfilter *)ctx;
if (!d->ctx_valid)
return NULL;
return &d->evsel->core.attr;
}
static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct addr_location *al;
struct addr_location a;
struct map *map;
u64 offset;
__s32 ret;
if (!d->ctx_valid)
return -1;
al = get_al(d);
if (!al)
return -1;
map = al->map;
if (map && ip >= map__start(map) && ip < map__end(map) &&
machine__kernel_ip(d->machine, ip) == machine__kernel_ip(d->machine, d->sample->ip))
goto have_map;
addr_location__init(&a);
thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
if (!a.map) {
ret = -1;
goto out;
}
map = a.map;
have_map:
offset = map__map_ip(map, ip);
if (ip + len >= map__end(map))
len = map__end(map) - ip;
ret = dso__data_read_offset(map__dso(map), d->machine, offset, buf, len);
out:
addr_location__exit(&a);
return ret;
}
static const struct perf_dlfilter_fns perf_dlfilter_fns = {
.resolve_ip = dlfilter__resolve_ip,
.resolve_addr = dlfilter__resolve_addr,
.args = dlfilter__args,
.resolve_address = dlfilter__resolve_address,
.al_cleanup = dlfilter__al_cleanup,
.insn = dlfilter__insn,
.srcline = dlfilter__srcline,
.attr = dlfilter__attr,
.object_code = dlfilter__object_code,
};
static char *find_dlfilter(const char *file)
{
char path[PATH_MAX];
char *exec_path;
if (strchr(file, '/'))
goto out;
if (!access(file, R_OK)) {
/*
* Prepend "./" so that dlopen will find the file in the
* current directory.
*/
snprintf(path, sizeof(path), "./%s", file);
file = path;
goto out;
}
exec_path = get_argv_exec_path();
if (!exec_path)
goto out;
snprintf(path, sizeof(path), "%s/dlfilters/%s", exec_path, file);
free(exec_path);
if (!access(path, R_OK))
file = path;
out:
return strdup(file);
}
#define CHECK_FLAG(x) BUILD_BUG_ON((u64)PERF_DLFILTER_FLAG_ ## x != (u64)PERF_IP_FLAG_ ## x)
static int dlfilter__init(struct dlfilter *d, const char *file, int dlargc, char **dlargv)
{
CHECK_FLAG(BRANCH);
CHECK_FLAG(CALL);
CHECK_FLAG(RETURN);
CHECK_FLAG(CONDITIONAL);
CHECK_FLAG(SYSCALLRET);
CHECK_FLAG(ASYNC);
CHECK_FLAG(INTERRUPT);
CHECK_FLAG(TX_ABORT);
CHECK_FLAG(TRACE_BEGIN);
CHECK_FLAG(TRACE_END);
CHECK_FLAG(IN_TX);
CHECK_FLAG(VMENTRY);
CHECK_FLAG(VMEXIT);
memset(d, 0, sizeof(*d));
d->file = find_dlfilter(file);
if (!d->file)
return -1;
d->dlargc = dlargc;
d->dlargv = dlargv;
return 0;
}
static void dlfilter__exit(struct dlfilter *d)
{
zfree(&d->file);
}
static int dlfilter__open(struct dlfilter *d)
{
d->handle = dlopen(d->file, RTLD_NOW);
if (!d->handle) {
pr_err("dlopen failed for: '%s'\n", d->file);
return -1;
}
d->start = dlsym(d->handle, "start");
d->filter_event = dlsym(d->handle, "filter_event");
d->filter_event_early = dlsym(d->handle, "filter_event_early");
d->stop = dlsym(d->handle, "stop");
d->fns = dlsym(d->handle, "perf_dlfilter_fns");
if (d->fns)
memcpy(d->fns, &perf_dlfilter_fns, sizeof(struct perf_dlfilter_fns));
return 0;
}
static int dlfilter__close(struct dlfilter *d)
{
return dlclose(d->handle);
}
struct dlfilter *dlfilter__new(const char *file, int dlargc, char **dlargv)
{
struct dlfilter *d = malloc(sizeof(*d));
if (!d)
return NULL;
if (dlfilter__init(d, file, dlargc, dlargv))
goto err_free;
if (dlfilter__open(d))
goto err_exit;
return d;
err_exit:
dlfilter__exit(d);
err_free:
free(d);
return NULL;
}
static void dlfilter__free(struct dlfilter *d)
{
if (d) {
dlfilter__exit(d);
free(d);
}
}
int dlfilter__start(struct dlfilter *d, struct perf_session *session)
{
if (d) {
d->session = session;
if (d->start) {
int ret;
d->in_start = true;
ret = d->start(&d->data, d);
d->in_start = false;
return ret;
}
}
return 0;
}
static int dlfilter__stop(struct dlfilter *d)
{
if (d && d->stop) {
int ret;
d->in_stop = true;
ret = d->stop(d->data, d);
d->in_stop = false;
return ret;
}
return 0;
}
void dlfilter__cleanup(struct dlfilter *d)
{
if (d) {
dlfilter__stop(d);
dlfilter__close(d);
dlfilter__free(d);
}
}
#define ASSIGN(x) d_sample.x = sample->x
int dlfilter__do_filter_event(struct dlfilter *d,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine,
struct addr_location *al,
struct addr_location *addr_al,
bool early)
{
struct perf_dlfilter_sample d_sample;
struct perf_dlfilter_al d_ip_al;
struct perf_dlfilter_al d_addr_al;
int ret;
d->event = event;
d->sample = sample;
d->evsel = evsel;
d->machine = machine;
d->al = al;
d->addr_al = addr_al;
d->d_sample = &d_sample;
d->d_ip_al = &d_ip_al;
d->d_addr_al = &d_addr_al;
d_sample.size = sizeof(d_sample);
d_ip_al.size = 0; /* To indicate d_ip_al is not initialized */
d_addr_al.size = 0; /* To indicate d_addr_al is not initialized */
ASSIGN(ip);
ASSIGN(pid);
ASSIGN(tid);
ASSIGN(time);
ASSIGN(addr);
ASSIGN(id);
ASSIGN(stream_id);
ASSIGN(period);
ASSIGN(weight);
ASSIGN(ins_lat);
ASSIGN(p_stage_cyc);
ASSIGN(transaction);
ASSIGN(insn_cnt);
ASSIGN(cyc_cnt);
ASSIGN(cpu);
ASSIGN(flags);
ASSIGN(data_src);
ASSIGN(phys_addr);
ASSIGN(data_page_size);
ASSIGN(code_page_size);
ASSIGN(cgroup);
ASSIGN(cpumode);
ASSIGN(misc);
ASSIGN(raw_size);
ASSIGN(raw_data);
ASSIGN(machine_pid);
ASSIGN(vcpu);
if (sample->branch_stack) {
d_sample.brstack_nr = sample->branch_stack->nr;
d_sample.brstack = (struct perf_branch_entry *)perf_sample__branch_entries(sample);
} else {
d_sample.brstack_nr = 0;
d_sample.brstack = NULL;
}
if (sample->callchain) {
d_sample.raw_callchain_nr = sample->callchain->nr;
d_sample.raw_callchain = (__u64 *)sample->callchain->ips;
} else {
d_sample.raw_callchain_nr = 0;
d_sample.raw_callchain = NULL;
}
d_sample.addr_correlates_sym =
(evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
sample_addr_correlates_sym(&evsel->core.attr);
d_sample.event = evsel__name(evsel);
d->ctx_valid = true;
if (early)
ret = d->filter_event_early(d->data, &d_sample, d);
else
ret = d->filter_event(d->data, &d_sample, d);
d->ctx_valid = false;
return ret;
}
bool get_filter_desc(const char *dirname, const char *name, char **desc,
char **long_desc)
{
char path[PATH_MAX];
void *handle;
const char *(*desc_fn)(const char **long_description);
snprintf(path, sizeof(path), "%s/%s", dirname, name);
handle = dlopen(path, RTLD_NOW);
if (!handle || !(dlsym(handle, "filter_event") || dlsym(handle, "filter_event_early")))
return false;
desc_fn = dlsym(handle, "filter_description");
if (desc_fn) {
const char *dsc;
const char *long_dsc;
dsc = desc_fn(&long_dsc);
if (dsc)
*desc = strdup(dsc);
if (long_dsc)
*long_desc = strdup(long_dsc);
}
dlclose(handle);
return true;
}
static void list_filters(const char *dirname)
{
struct dirent *entry;
DIR *dir;
dir = opendir(dirname);
if (!dir)
return;
while ((entry = readdir(dir)) != NULL)
{
size_t n = strlen(entry->d_name);
char *long_desc = NULL;
char *desc = NULL;
if (entry->d_type == DT_DIR || n < 4 ||
strcmp(".so", entry->d_name + n - 3))
continue;
if (!get_filter_desc(dirname, entry->d_name, &desc, &long_desc))
continue;
printf(" %-36s %s\n", entry->d_name, desc ? desc : "");
if (verbose > 0) {
char *p = long_desc;
char *line;
while ((line = strsep(&p, "\n")) != NULL)
printf("%39s%s\n", "", line);
}
free(long_desc);
free(desc);
}
closedir(dir);
}
int list_available_dlfilters(const struct option *opt __maybe_unused,
const char *s __maybe_unused,
int unset __maybe_unused)
{
char path[PATH_MAX];
char *exec_path;
printf("List of available dlfilters:\n");
list_filters(".");
exec_path = get_argv_exec_path();
if (!exec_path)
goto out;
snprintf(path, sizeof(path), "%s/dlfilters", exec_path);
list_filters(path);
free(exec_path);
out:
exit(0);
}
| linux-master | tools/perf/util/dlfilter.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "util/debug.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/mmap.h"
#include "util/perf_api_probe.h"
#include <perf/mmap.h>
#include <linux/perf_event.h>
#include <limits.h>
#include <pthread.h>
#include <sched.h>
#include <stdbool.h>
int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
evsel__sb_cb_t cb, void *data)
{
struct evsel *evsel;
if (!attr->sample_id_all) {
pr_warning("enabling sample_id_all for all side band events\n");
attr->sample_id_all = 1;
}
evsel = evsel__new_idx(attr, evlist->core.nr_entries);
if (!evsel)
return -1;
evsel->side_band.cb = cb;
evsel->side_band.data = data;
evlist__add(evlist, evsel);
return 0;
}
static void *perf_evlist__poll_thread(void *arg)
{
struct evlist *evlist = arg;
bool draining = false;
int i, done = 0;
/*
* In order to read symbols from other namespaces perf to needs to call
* setns(2). This isn't permitted if the struct_fs has multiple users.
* unshare(2) the fs so that we may continue to setns into namespaces
* that we're observing when, for instance, reading the build-ids at
* the end of a 'perf record' session.
*/
unshare(CLONE_FS);
while (!done) {
bool got_data = false;
if (evlist->thread.done)
draining = true;
if (!draining)
evlist__poll(evlist, 1000);
for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *map = &evlist->mmap[i];
union perf_event *event;
if (perf_mmap__read_init(&map->core))
continue;
while ((event = perf_mmap__read_event(&map->core)) != NULL) {
struct evsel *evsel = evlist__event2evsel(evlist, event);
if (evsel && evsel->side_band.cb)
evsel->side_band.cb(event, evsel->side_band.data);
else
pr_warning("cannot locate proper evsel for the side band event\n");
perf_mmap__consume(&map->core);
got_data = true;
}
perf_mmap__read_done(&map->core);
}
if (draining && !got_data)
break;
}
return NULL;
}
void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
evsel->core.attr.sample_id_all = 1;
evsel->core.attr.watermark = 1;
evsel->core.attr.wakeup_watermark = 1;
evsel->side_band.cb = cb;
evsel->side_band.data = data;
}
}
int evlist__start_sb_thread(struct evlist *evlist, struct target *target)
{
struct evsel *counter;
if (!evlist)
return 0;
if (evlist__create_maps(evlist, target))
goto out_delete_evlist;
if (evlist->core.nr_entries > 1) {
bool can_sample_identifier = perf_can_sample_identifier();
evlist__for_each_entry(evlist, counter)
evsel__set_sample_id(counter, can_sample_identifier);
evlist__set_id_pos(evlist);
}
evlist__for_each_entry(evlist, counter) {
if (evsel__open(counter, evlist->core.user_requested_cpus,
evlist->core.threads) < 0)
goto out_delete_evlist;
}
if (evlist__mmap(evlist, UINT_MAX))
goto out_delete_evlist;
evlist__for_each_entry(evlist, counter) {
if (evsel__enable(counter))
goto out_delete_evlist;
}
evlist->thread.done = 0;
if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
goto out_delete_evlist;
return 0;
out_delete_evlist:
evlist__delete(evlist);
evlist = NULL;
return -1;
}
void evlist__stop_sb_thread(struct evlist *evlist)
{
if (!evlist)
return;
evlist->thread.done = 1;
pthread_join(evlist->thread.th, NULL);
evlist__delete(evlist);
}
| linux-master | tools/perf/util/sideband_evlist.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdlib.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <linux/string.h>
#include <internal/lib.h>
#include <symbol/kallsyms.h>
#include "bpf-event.h"
#include "bpf-utils.h"
#include "debug.h"
#include "dso.h"
#include "symbol.h"
#include "machine.h"
#include "env.h"
#include "session.h"
#include "map.h"
#include "evlist.h"
#include "record.h"
#include "util/synthetic-events.h"
static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
{
int ret = 0;
size_t i;
for (i = 0; i < len; i++)
ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
return ret;
}
static int machine__process_bpf_event_load(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct bpf_prog_info_node *info_node;
struct perf_env *env = machine->env;
struct perf_bpil *info_linear;
int id = event->bpf.id;
unsigned int i;
/* perf-record, no need to handle bpf-event */
if (env == NULL)
return 0;
info_node = perf_env__find_bpf_prog_info(env, id);
if (!info_node)
return 0;
info_linear = info_node->info_linear;
for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
u64 addr = addrs[i];
struct map *map = maps__find(machine__kernel_maps(machine), addr);
if (map) {
struct dso *dso = map__dso(map);
dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
dso->bpf_prog.id = id;
dso->bpf_prog.sub_id = i;
dso->bpf_prog.env = env;
}
}
return 0;
}
int machine__process_bpf(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
if (dump_trace)
perf_event__fprintf_bpf(event, stdout);
switch (event->bpf.type) {
case PERF_BPF_EVENT_PROG_LOAD:
return machine__process_bpf_event_load(machine, event, sample);
case PERF_BPF_EVENT_PROG_UNLOAD:
/*
* Do not free bpf_prog_info and btf of the program here,
* as annotation still need them. They will be freed at
* the end of the session.
*/
break;
default:
pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
break;
}
return 0;
}
static int perf_env__fetch_btf(struct perf_env *env,
u32 btf_id,
struct btf *btf)
{
struct btf_node *node;
u32 data_size;
const void *data;
data = btf__raw_data(btf, &data_size);
node = malloc(data_size + sizeof(struct btf_node));
if (!node)
return -1;
node->id = btf_id;
node->data_size = data_size;
memcpy(node->data, data, data_size);
if (!perf_env__insert_btf(env, node)) {
/* Insertion failed because of a duplicate. */
free(node);
return -1;
}
return 0;
}
static int synthesize_bpf_prog_name(char *buf, int size,
struct bpf_prog_info *info,
struct btf *btf,
u32 sub_id)
{
u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
void *func_infos = (void *)(uintptr_t)(info->func_info);
u32 sub_prog_cnt = info->nr_jited_ksyms;
const struct bpf_func_info *finfo;
const char *short_name = NULL;
const struct btf_type *t;
int name_len;
name_len = snprintf(buf, size, "bpf_prog_");
name_len += snprintf_hex(buf + name_len, size - name_len,
prog_tags[sub_id], BPF_TAG_SIZE);
if (btf) {
finfo = func_infos + sub_id * info->func_info_rec_size;
t = btf__type_by_id(btf, finfo->type_id);
short_name = btf__name_by_offset(btf, t->name_off);
} else if (sub_id == 0 && sub_prog_cnt == 1) {
/* no subprog */
if (info->name[0])
short_name = info->name;
} else
short_name = "F";
if (short_name)
name_len += snprintf(buf + name_len, size - name_len,
"_%s", short_name);
return name_len;
}
/*
* Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
* program. One PERF_RECORD_BPF_EVENT is generated for the program. And
* one PERF_RECORD_KSYMBOL is generated for each sub program.
*
* Returns:
* 0 for success;
* -1 for failures;
* -2 for lack of kernel support.
*/
static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
int fd,
union perf_event *event,
struct record_opts *opts)
{
struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
struct perf_record_bpf_event *bpf_event = &event->bpf;
struct perf_tool *tool = session->tool;
struct bpf_prog_info_node *info_node;
struct perf_bpil *info_linear;
struct bpf_prog_info *info;
struct btf *btf = NULL;
struct perf_env *env;
u32 sub_prog_cnt, i;
int err = 0;
u64 arrays;
/*
* for perf-record and perf-report use header.env;
* otherwise, use global perf_env.
*/
env = session->data ? &session->header.env : &perf_env;
arrays = 1UL << PERF_BPIL_JITED_KSYMS;
arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
arrays |= 1UL << PERF_BPIL_FUNC_INFO;
arrays |= 1UL << PERF_BPIL_PROG_TAGS;
arrays |= 1UL << PERF_BPIL_JITED_INSNS;
arrays |= 1UL << PERF_BPIL_LINE_INFO;
arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
info_linear = get_bpf_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
info_linear = NULL;
pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
return -1;
}
if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
free(info_linear);
pr_debug("%s: the kernel is too old, aborting\n", __func__);
return -2;
}
info = &info_linear->info;
if (!info->jited_ksyms) {
free(info_linear);
return -1;
}
/* number of ksyms, func_lengths, and tags should match */
sub_prog_cnt = info->nr_jited_ksyms;
if (sub_prog_cnt != info->nr_prog_tags ||
sub_prog_cnt != info->nr_jited_func_lens) {
free(info_linear);
return -1;
}
/* check BTF func info support */
if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
/* btf func info number should be same as sub_prog_cnt */
if (sub_prog_cnt != info->nr_func_info) {
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
free(info_linear);
return -1;
}
btf = btf__load_from_kernel_by_id(info->btf_id);
if (libbpf_get_error(btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
err = -1;
goto out;
}
perf_env__fetch_btf(env, info->btf_id, btf);
}
/* Synthesize PERF_RECORD_KSYMBOL */
for (i = 0; i < sub_prog_cnt; i++) {
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
int name_len;
*ksymbol_event = (struct perf_record_ksymbol) {
.header = {
.type = PERF_RECORD_KSYMBOL,
.size = offsetof(struct perf_record_ksymbol, name),
},
.addr = prog_addrs[i],
.len = prog_lens[i],
.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
.flags = 0,
};
name_len = synthesize_bpf_prog_name(ksymbol_event->name,
KSYM_NAME_LEN, info, btf, i);
ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
sizeof(u64));
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
if (!opts->no_bpf_event) {
/* Synthesize PERF_RECORD_BPF_EVENT */
*bpf_event = (struct perf_record_bpf_event) {
.header = {
.type = PERF_RECORD_BPF_EVENT,
.size = sizeof(struct perf_record_bpf_event),
},
.type = PERF_BPF_EVENT_PROG_LOAD,
.flags = 0,
.id = info->id,
};
memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
/* save bpf_prog_info to env */
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (!info_node) {
err = -1;
goto out;
}
info_node->info_linear = info_linear;
perf_env__insert_bpf_prog_info(env, info_node);
info_linear = NULL;
/*
* process after saving bpf_prog_info to env, so that
* required information is ready for look up
*/
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
out:
free(info_linear);
btf__free(btf);
return err ? -1 : 0;
}
struct kallsyms_parse {
union perf_event *event;
perf_event__handler_t process;
struct machine *machine;
struct perf_tool *tool;
};
static int
process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data)
{
struct machine *machine = data->machine;
union perf_event *event = data->event;
struct perf_record_ksymbol *ksymbol;
int len;
ksymbol = &event->ksymbol;
*ksymbol = (struct perf_record_ksymbol) {
.header = {
.type = PERF_RECORD_KSYMBOL,
.size = offsetof(struct perf_record_ksymbol, name),
},
.addr = addr,
.len = page_size,
.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
.flags = 0,
};
len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name);
ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64));
memset((void *) event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
return perf_tool__process_synth_event(data->tool, event, machine,
data->process);
}
static int
kallsyms_process_symbol(void *data, const char *_name,
char type __maybe_unused, u64 start)
{
char disp[KSYM_NAME_LEN];
char *module, *name;
unsigned long id;
int err = 0;
module = strchr(_name, '\t');
if (!module)
return 0;
/* We are going after [bpf] module ... */
if (strcmp(module + 1, "[bpf]"))
return 0;
name = memdup(_name, (module - _name) + 1);
if (!name)
return -ENOMEM;
name[module - _name] = 0;
/* .. and only for trampolines and dispatchers */
if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) ||
(sscanf(name, "bpf_dispatcher_%s", disp) == 1))
err = process_bpf_image(name, start, data);
free(name);
return err;
}
int perf_event__synthesize_bpf_events(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts)
{
const char *kallsyms_filename = "/proc/kallsyms";
struct kallsyms_parse arg;
union perf_event *event;
__u32 id = 0;
int err;
int fd;
event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
if (!event)
return -1;
/* Synthesize all the bpf programs in system. */
while (true) {
err = bpf_prog_get_next_id(id, &id);
if (err) {
if (errno == ENOENT) {
err = 0;
break;
}
pr_debug("%s: can't get next program: %s%s\n",
__func__, strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
/* don't report error on old kernel or EPERM */
err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
break;
}
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0) {
pr_debug("%s: failed to get fd for prog_id %u\n",
__func__, id);
continue;
}
err = perf_event__synthesize_one_bpf_prog(session, process,
machine, fd,
event, opts);
close(fd);
if (err) {
/* do not return error for old kernel */
if (err == -2)
err = 0;
break;
}
}
/* Synthesize all the bpf images - trampolines/dispatchers. */
if (symbol_conf.kallsyms_name != NULL)
kallsyms_filename = symbol_conf.kallsyms_name;
arg = (struct kallsyms_parse) {
.event = event,
.process = process,
.machine = machine,
.tool = session->tool,
};
if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) {
pr_err("%s: failed to synthesize bpf images: %s\n",
__func__, strerror(errno));
}
free(event);
return err;
}
static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
{
struct bpf_prog_info_node *info_node;
struct perf_bpil *info_linear;
struct btf *btf = NULL;
u64 arrays;
u32 btf_id;
int fd;
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0)
return;
arrays = 1UL << PERF_BPIL_JITED_KSYMS;
arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
arrays |= 1UL << PERF_BPIL_FUNC_INFO;
arrays |= 1UL << PERF_BPIL_PROG_TAGS;
arrays |= 1UL << PERF_BPIL_JITED_INSNS;
arrays |= 1UL << PERF_BPIL_LINE_INFO;
arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
info_linear = get_bpf_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
goto out;
}
btf_id = info_linear->info.btf_id;
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (info_node) {
info_node->info_linear = info_linear;
perf_env__insert_bpf_prog_info(env, info_node);
} else
free(info_linear);
if (btf_id == 0)
goto out;
btf = btf__load_from_kernel_by_id(btf_id);
if (libbpf_get_error(btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n",
__func__, btf_id);
goto out;
}
perf_env__fetch_btf(env, btf_id, btf);
out:
btf__free(btf);
close(fd);
}
static int bpf_event__sb_cb(union perf_event *event, void *data)
{
struct perf_env *env = data;
if (event->header.type != PERF_RECORD_BPF_EVENT)
return -1;
switch (event->bpf.type) {
case PERF_BPF_EVENT_PROG_LOAD:
perf_env__add_bpf_info(env, event->bpf.id);
case PERF_BPF_EVENT_PROG_UNLOAD:
/*
* Do not free bpf_prog_info and btf of the program here,
* as annotation still need them. They will be freed at
* the end of the session.
*/
break;
default:
pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
break;
}
return 0;
}
int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.sample_id_all = 1,
.watermark = 1,
.bpf_event = 1,
.size = sizeof(attr), /* to capture ABI version */
};
/*
* Older gcc versions don't support designated initializers, like above,
* for unnamed union members, such as the following:
*/
attr.wakeup_watermark = 1;
return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
}
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
struct perf_env *env,
FILE *fp)
{
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
char name[KSYM_NAME_LEN];
struct btf *btf = NULL;
u32 sub_prog_cnt, i;
sub_prog_cnt = info->nr_jited_ksyms;
if (sub_prog_cnt != info->nr_prog_tags ||
sub_prog_cnt != info->nr_jited_func_lens)
return;
if (info->btf_id) {
struct btf_node *node;
node = perf_env__find_btf(env, info->btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
node->data_size);
}
if (sub_prog_cnt == 1) {
synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
info->id, name, prog_addrs[0], prog_lens[0]);
goto out;
}
fprintf(fp, "# bpf_prog_info %u:\n", info->id);
for (i = 0; i < sub_prog_cnt; i++) {
synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
i, name, prog_addrs[i], prog_lens[i]);
}
out:
btf__free(btf);
}
| linux-master | tools/perf/util/bpf-event.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009, Steven Rostedt <[email protected]>
*/
#include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <traceevent/event-parse.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include "trace-event.h"
#include "debug.h"
#include "util.h"
static int input_fd;
static ssize_t trace_data_size;
static bool repipe;
static int __do_read(int fd, void *buf, int size)
{
int rsize = size;
while (size) {
int ret = read(fd, buf, size);
if (ret <= 0)
return -1;
if (repipe) {
int retw = write(STDOUT_FILENO, buf, ret);
if (retw <= 0 || retw != ret) {
pr_debug("repiping input file");
return -1;
}
}
size -= ret;
buf += ret;
}
return rsize;
}
static int do_read(void *data, int size)
{
int r;
r = __do_read(input_fd, data, size);
if (r <= 0) {
pr_debug("reading input file (size expected=%d received=%d)",
size, r);
return -1;
}
trace_data_size += r;
return r;
}
/* If it fails, the next read will report it */
static void skip(int size)
{
char buf[BUFSIZ];
int r;
while (size) {
r = size > BUFSIZ ? BUFSIZ : size;
do_read(buf, r);
size -= r;
}
}
static unsigned int read4(struct tep_handle *pevent)
{
unsigned int data;
if (do_read(&data, 4) < 0)
return 0;
return tep_read_number(pevent, &data, 4);
}
static unsigned long long read8(struct tep_handle *pevent)
{
unsigned long long data;
if (do_read(&data, 8) < 0)
return 0;
return tep_read_number(pevent, &data, 8);
}
static char *read_string(void)
{
char buf[BUFSIZ];
char *str = NULL;
int size = 0;
off_t r;
char c;
for (;;) {
r = read(input_fd, &c, 1);
if (r < 0) {
pr_debug("reading input file");
goto out;
}
if (!r) {
pr_debug("no data");
goto out;
}
if (repipe) {
int retw = write(STDOUT_FILENO, &c, 1);
if (retw <= 0 || retw != r) {
pr_debug("repiping input file string");
goto out;
}
}
buf[size++] = c;
if (!c)
break;
}
trace_data_size += size;
str = malloc(size);
if (str)
memcpy(str, buf, size);
out:
return str;
}
static int read_proc_kallsyms(struct tep_handle *pevent)
{
unsigned int size;
size = read4(pevent);
if (!size)
return 0;
/*
* Just skip it, now that we configure libtraceevent to use the
* tools/perf/ symbol resolver.
*
* We need to skip it so that we can continue parsing old perf.data
* files, that contains this /proc/kallsyms payload.
*
* Newer perf.data files will have just the 4-bytes zeros "kallsyms
* payload", so that older tools can continue reading it and interpret
* it as "no kallsyms payload is present".
*/
lseek(input_fd, size, SEEK_CUR);
trace_data_size += size;
return 0;
}
static int read_ftrace_printk(struct tep_handle *pevent)
{
unsigned int size;
char *buf;
/* it can have 0 size */
size = read4(pevent);
if (!size)
return 0;
buf = malloc(size + 1);
if (buf == NULL)
return -1;
if (do_read(buf, size) < 0) {
free(buf);
return -1;
}
buf[size] = '\0';
parse_ftrace_printk(pevent, buf, size);
free(buf);
return 0;
}
static int read_header_files(struct tep_handle *pevent)
{
unsigned long long size;
char *header_page;
char buf[BUFSIZ];
int ret = 0;
if (do_read(buf, 12) < 0)
return -1;
if (memcmp(buf, "header_page", 12) != 0) {
pr_debug("did not read header page");
return -1;
}
size = read8(pevent);
header_page = malloc(size);
if (header_page == NULL)
return -1;
if (do_read(header_page, size) < 0) {
pr_debug("did not read header page");
free(header_page);
return -1;
}
if (!tep_parse_header_page(pevent, header_page, size,
tep_get_long_size(pevent))) {
/*
* The commit field in the page is of type long,
* use that instead, since it represents the kernel.
*/
tep_set_long_size(pevent, tep_get_header_page_size(pevent));
}
free(header_page);
if (do_read(buf, 13) < 0)
return -1;
if (memcmp(buf, "header_event", 13) != 0) {
pr_debug("did not read header event");
return -1;
}
size = read8(pevent);
skip(size);
return ret;
}
static int read_ftrace_file(struct tep_handle *pevent, unsigned long long size)
{
int ret;
char *buf;
buf = malloc(size);
if (buf == NULL) {
pr_debug("memory allocation failure\n");
return -1;
}
ret = do_read(buf, size);
if (ret < 0) {
pr_debug("error reading ftrace file.\n");
goto out;
}
ret = parse_ftrace_file(pevent, buf, size);
if (ret < 0)
pr_debug("error parsing ftrace file.\n");
out:
free(buf);
return ret;
}
static int read_event_file(struct tep_handle *pevent, char *sys,
unsigned long long size)
{
int ret;
char *buf;
buf = malloc(size);
if (buf == NULL) {
pr_debug("memory allocation failure\n");
return -1;
}
ret = do_read(buf, size);
if (ret < 0)
goto out;
ret = parse_event_file(pevent, buf, size, sys);
if (ret < 0)
pr_debug("error parsing event file.\n");
out:
free(buf);
return ret;
}
static int read_ftrace_files(struct tep_handle *pevent)
{
unsigned long long size;
int count;
int i;
int ret;
count = read4(pevent);
for (i = 0; i < count; i++) {
size = read8(pevent);
ret = read_ftrace_file(pevent, size);
if (ret)
return ret;
}
return 0;
}
static int read_event_files(struct tep_handle *pevent)
{
unsigned long long size;
char *sys;
int systems;
int count;
int i,x;
int ret;
systems = read4(pevent);
for (i = 0; i < systems; i++) {
sys = read_string();
if (sys == NULL)
return -1;
count = read4(pevent);
for (x=0; x < count; x++) {
size = read8(pevent);
ret = read_event_file(pevent, sys, size);
if (ret) {
free(sys);
return ret;
}
}
free(sys);
}
return 0;
}
static int read_saved_cmdline(struct tep_handle *pevent)
{
unsigned long long size;
char *buf;
int ret;
/* it can have 0 size */
size = read8(pevent);
if (!size)
return 0;
buf = malloc(size + 1);
if (buf == NULL) {
pr_debug("memory allocation failure\n");
return -1;
}
ret = do_read(buf, size);
if (ret < 0) {
pr_debug("error reading saved cmdlines\n");
goto out;
}
buf[ret] = '\0';
parse_saved_cmdline(pevent, buf, size);
ret = 0;
out:
free(buf);
return ret;
}
ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
{
char buf[BUFSIZ];
char test[] = { 23, 8, 68 };
char *version;
int show_version = 0;
int show_funcs = 0;
int show_printk = 0;
ssize_t size = -1;
int file_bigendian;
int host_bigendian;
int file_long_size;
int file_page_size;
struct tep_handle *pevent = NULL;
int err;
repipe = __repipe;
input_fd = fd;
if (do_read(buf, 3) < 0)
return -1;
if (memcmp(buf, test, 3) != 0) {
pr_debug("no trace data in the file");
return -1;
}
if (do_read(buf, 7) < 0)
return -1;
if (memcmp(buf, "tracing", 7) != 0) {
pr_debug("not a trace file (missing 'tracing' tag)");
return -1;
}
version = read_string();
if (version == NULL)
return -1;
if (show_version)
printf("version = %s\n", version);
if (do_read(buf, 1) < 0) {
free(version);
return -1;
}
file_bigendian = buf[0];
host_bigendian = host_is_bigendian() ? 1 : 0;
if (trace_event__init(tevent)) {
pr_debug("trace_event__init failed");
goto out;
}
pevent = tevent->pevent;
tep_set_flag(pevent, TEP_NSEC_OUTPUT);
tep_set_file_bigendian(pevent, file_bigendian);
tep_set_local_bigendian(pevent, host_bigendian);
if (do_read(buf, 1) < 0)
goto out;
file_long_size = buf[0];
file_page_size = read4(pevent);
if (!file_page_size)
goto out;
tep_set_long_size(pevent, file_long_size);
tep_set_page_size(pevent, file_page_size);
err = read_header_files(pevent);
if (err)
goto out;
err = read_ftrace_files(pevent);
if (err)
goto out;
err = read_event_files(pevent);
if (err)
goto out;
err = read_proc_kallsyms(pevent);
if (err)
goto out;
err = read_ftrace_printk(pevent);
if (err)
goto out;
if (atof(version) >= 0.6) {
err = read_saved_cmdline(pevent);
if (err)
goto out;
}
size = trace_data_size;
repipe = false;
if (show_funcs) {
tep_print_funcs(pevent);
} else if (show_printk) {
tep_print_printk(pevent);
}
pevent = NULL;
out:
if (pevent)
trace_event__cleanup(tevent);
free(version);
return size;
}
| linux-master | tools/perf/util/trace-event-read.c |
// SPDX-License-Identifier: GPL-2.0
#include "term.h"
#include <stdlib.h>
#include <termios.h>
#include <unistd.h>
#include <sys/ioctl.h>
void get_term_dimensions(struct winsize *ws)
{
char *s = getenv("LINES");
if (s != NULL) {
ws->ws_row = atoi(s);
s = getenv("COLUMNS");
if (s != NULL) {
ws->ws_col = atoi(s);
if (ws->ws_row && ws->ws_col)
return;
}
}
#ifdef TIOCGWINSZ
if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
ws->ws_row && ws->ws_col)
return;
#endif
ws->ws_row = 25;
ws->ws_col = 80;
}
void set_term_quiet_input(struct termios *old)
{
struct termios tc;
tcgetattr(0, old);
tc = *old;
tc.c_lflag &= ~(ICANON | ECHO);
tc.c_cc[VMIN] = 0;
tc.c_cc[VTIME] = 0;
tcsetattr(0, TCSANOW, &tc);
}
| linux-master | tools/perf/util/term.c |
// SPDX-License-Identifier: GPL-2.0
#include <subcmd/parse-options.h>
#include <stdio.h>
#include <time.h>
#include <strings.h>
#include <linux/time64.h>
#include "debug.h"
#include "clockid.h"
#include "record.h"
struct clockid_map {
const char *name;
int clockid;
};
#define CLOCKID_MAP(n, c) \
{ .name = n, .clockid = (c), }
#define CLOCKID_END { .name = NULL, }
/*
* Add the missing ones, we need to build on many distros...
*/
#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW 4
#endif
#ifndef CLOCK_BOOTTIME
#define CLOCK_BOOTTIME 7
#endif
#ifndef CLOCK_TAI
#define CLOCK_TAI 11
#endif
static const struct clockid_map clockids[] = {
/* available for all events, NMI safe */
CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
/* available for some events */
CLOCKID_MAP("realtime", CLOCK_REALTIME),
CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
CLOCKID_MAP("tai", CLOCK_TAI),
/* available for the lazy */
CLOCKID_MAP("mono", CLOCK_MONOTONIC),
CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
CLOCKID_MAP("real", CLOCK_REALTIME),
CLOCKID_MAP("boot", CLOCK_BOOTTIME),
CLOCKID_END,
};
static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
{
struct timespec res;
*res_ns = 0;
if (!clock_getres(clk_id, &res))
*res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
else
pr_warning("WARNING: Failed to determine specified clock resolution.\n");
return 0;
}
int parse_clockid(const struct option *opt, const char *str, int unset)
{
struct record_opts *opts = (struct record_opts *)opt->value;
const struct clockid_map *cm;
const char *ostr = str;
if (unset) {
opts->use_clockid = 0;
return 0;
}
/* no arg passed */
if (!str)
return 0;
/* no setting it twice */
if (opts->use_clockid)
return -1;
opts->use_clockid = true;
/* if its a number, we're done */
if (sscanf(str, "%d", &opts->clockid) == 1)
return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
/* allow a "CLOCK_" prefix to the name */
if (!strncasecmp(str, "CLOCK_", 6))
str += 6;
for (cm = clockids; cm->name; cm++) {
if (!strcasecmp(str, cm->name)) {
opts->clockid = cm->clockid;
return get_clockid_res(opts->clockid,
&opts->clockid_res_ns);
}
}
opts->use_clockid = false;
ui__warning("unknown clockid %s, check man page\n", ostr);
return -1;
}
const char *clockid_name(clockid_t clk_id)
{
const struct clockid_map *cm;
for (cm = clockids; cm->name; cm++) {
if (cm->clockid == clk_id)
return cm->name;
}
return "(not found)";
}
| linux-master | tools/perf/util/clockid.c |
// SPDX-License-Identifier: LGPL-2.1
#include "namespaces.h"
#include <unistd.h>
#include <sys/syscall.h>
int setns(int fd, int nstype)
{
return syscall(__NR_setns, fd, nstype);
}
| linux-master | tools/perf/util/setns.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <string.h>
#include <linux/string.h>
#include "evlist.h"
#include "env.h"
#include "header.h"
#include "sample-raw.h"
/*
* Check platform the perf data file was created on and perform platform
* specific interpretation.
*/
void evlist__init_trace_event_sample_raw(struct evlist *evlist)
{
const char *arch_pf = perf_env__arch(evlist->env);
const char *cpuid = perf_env__cpuid(evlist->env);
if (arch_pf && !strcmp("s390", arch_pf))
evlist->trace_event_sample_raw = evlist__s390_sample_raw;
else if (arch_pf && !strcmp("x86", arch_pf) &&
cpuid && strstarts(cpuid, "AuthenticAMD") &&
evlist__has_amd_ibs(evlist)) {
evlist->trace_event_sample_raw = evlist__amd_sample_raw;
}
}
| linux-master | tools/perf/util/sample-raw.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <linux/ctype.h>
#include <sys/types.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdio.h>
#include <stdbool.h>
#include <dirent.h>
#include <api/fs/fs.h>
#include <locale.h>
#include <fnmatch.h>
#include <math.h>
#include "debug.h"
#include "evsel.h"
#include "pmu.h"
#include "pmus.h"
#include <util/pmu-bison.h>
#include <util/pmu-flex.h>
#include "parse-events.h"
#include "print-events.h"
#include "header.h"
#include "string2.h"
#include "strbuf.h"
#include "fncache.h"
#include "util/evsel_config.h"
struct perf_pmu perf_pmu__fake = {
.name = "fake",
};
#define UNIT_MAX_LEN 31 /* max length for event unit name */
/**
* struct perf_pmu_alias - An event either read from sysfs or builtin in
* pmu-events.c, created by parsing the pmu-events json files.
*/
struct perf_pmu_alias {
/** @name: Name of the event like "mem-loads". */
char *name;
/** @desc: Optional short description of the event. */
char *desc;
/** @long_desc: Optional long description. */
char *long_desc;
/**
* @topic: Optional topic such as cache or pipeline, particularly for
* json events.
*/
char *topic;
/** @terms: Owned list of the original parsed parameters. */
struct list_head terms;
/** @list: List element of struct perf_pmu aliases. */
struct list_head list;
/**
* @pmu_name: The name copied from the json struct pmu_event. This can
* differ from the PMU name as it won't have suffixes.
*/
char *pmu_name;
/** @unit: Units for the event, such as bytes or cache lines. */
char unit[UNIT_MAX_LEN+1];
/** @scale: Value to scale read counter values by. */
double scale;
/**
* @per_pkg: Does the file
* <sysfs>/bus/event_source/devices/<pmu_name>/events/<name>.per-pkg or
* equivalent json value exist and have the value 1.
*/
bool per_pkg;
/**
* @snapshot: Does the file
* <sysfs>/bus/event_source/devices/<pmu_name>/events/<name>.snapshot
* exist and have the value 1.
*/
bool snapshot;
/**
* @deprecated: Is the event hidden and so not shown in perf list by
* default.
*/
bool deprecated;
/** @from_sysfs: Was the alias from sysfs or a json event? */
bool from_sysfs;
/** @info_loaded: Have the scale, unit and other values been read from disk? */
bool info_loaded;
};
/**
* struct perf_pmu_format - Values from a format file read from
* <sysfs>/devices/cpu/format/ held in struct perf_pmu.
*
* For example, the contents of <sysfs>/devices/cpu/format/event may be
* "config:0-7" and will be represented here as name="event",
* value=PERF_PMU_FORMAT_VALUE_CONFIG and bits 0 to 7 will be set.
*/
struct perf_pmu_format {
/** @list: Element on list within struct perf_pmu. */
struct list_head list;
/** @bits: Which config bits are set by this format value. */
DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
/** @name: The modifier/file name. */
char *name;
/**
* @value : Which config value the format relates to. Supported values
* are from PERF_PMU_FORMAT_VALUE_CONFIG to
* PERF_PMU_FORMAT_VALUE_CONFIG_END.
*/
u16 value;
/** @loaded: Has the contents been loaded/parsed. */
bool loaded;
};
static int pmu_aliases_parse(struct perf_pmu *pmu);
static struct perf_pmu_format *perf_pmu__new_format(struct list_head *list, char *name)
{
struct perf_pmu_format *format;
format = zalloc(sizeof(*format));
if (!format)
return NULL;
format->name = strdup(name);
if (!format->name) {
free(format);
return NULL;
}
list_add_tail(&format->list, list);
return format;
}
/* Called at the end of parsing a format. */
void perf_pmu_format__set_value(void *vformat, int config, unsigned long *bits)
{
struct perf_pmu_format *format = vformat;
format->value = config;
memcpy(format->bits, bits, sizeof(format->bits));
}
static void __perf_pmu_format__load(struct perf_pmu_format *format, FILE *file)
{
void *scanner;
int ret;
ret = perf_pmu_lex_init(&scanner);
if (ret)
return;
perf_pmu_set_in(file, scanner);
ret = perf_pmu_parse(format, scanner);
perf_pmu_lex_destroy(scanner);
format->loaded = true;
}
static void perf_pmu_format__load(struct perf_pmu *pmu, struct perf_pmu_format *format)
{
char path[PATH_MAX];
FILE *file = NULL;
if (format->loaded)
return;
if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, "format"))
return;
assert(strlen(path) + strlen(format->name) + 2 < sizeof(path));
strcat(path, "/");
strcat(path, format->name);
file = fopen(path, "r");
if (!file)
return;
__perf_pmu_format__load(format, file);
fclose(file);
}
/*
* Parse & process all the sysfs attributes located under
* the directory specified in 'dir' parameter.
*/
int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load)
{
struct dirent *evt_ent;
DIR *format_dir;
int ret = 0;
format_dir = fdopendir(dirfd);
if (!format_dir)
return -EINVAL;
while ((evt_ent = readdir(format_dir)) != NULL) {
struct perf_pmu_format *format;
char *name = evt_ent->d_name;
if (!strcmp(name, ".") || !strcmp(name, ".."))
continue;
format = perf_pmu__new_format(&pmu->format, name);
if (!format) {
ret = -ENOMEM;
break;
}
if (eager_load) {
FILE *file;
int fd = openat(dirfd, name, O_RDONLY);
if (fd < 0) {
ret = -errno;
break;
}
file = fdopen(fd, "r");
if (!file) {
close(fd);
break;
}
__perf_pmu_format__load(format, file);
fclose(file);
}
}
closedir(format_dir);
return ret;
}
/*
* Reading/parsing the default pmu format definition, which should be
* located at:
* /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
*/
static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name)
{
int fd;
fd = perf_pmu__pathname_fd(dirfd, name, "format", O_DIRECTORY);
if (fd < 0)
return 0;
/* it'll close the fd */
if (perf_pmu__format_parse(pmu, fd, /*eager_load=*/false))
return -1;
return 0;
}
int perf_pmu__convert_scale(const char *scale, char **end, double *sval)
{
char *lc;
int ret = 0;
/*
* save current locale
*/
lc = setlocale(LC_NUMERIC, NULL);
/*
* The lc string may be allocated in static storage,
* so get a dynamic copy to make it survive setlocale
* call below.
*/
lc = strdup(lc);
if (!lc) {
ret = -ENOMEM;
goto out;
}
/*
* force to C locale to ensure kernel
* scale string is converted correctly.
* kernel uses default C locale.
*/
setlocale(LC_NUMERIC, "C");
*sval = strtod(scale, end);
out:
/* restore locale */
setlocale(LC_NUMERIC, lc);
free(lc);
return ret;
}
static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
struct stat st;
ssize_t sret;
size_t len;
char scale[128];
int fd, ret = -1;
char path[PATH_MAX];
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
scnprintf(path + len, sizeof(path) - len, "%s/%s.scale", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
if (fstat(fd, &st) < 0)
goto error;
sret = read(fd, scale, sizeof(scale)-1);
if (sret < 0)
goto error;
if (scale[sret - 1] == '\n')
scale[sret - 1] = '\0';
else
scale[sret] = '\0';
ret = perf_pmu__convert_scale(scale, NULL, &alias->scale);
error:
close(fd);
return ret;
}
static int perf_pmu__parse_unit(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
char path[PATH_MAX];
size_t len;
ssize_t sret;
int fd;
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
scnprintf(path + len, sizeof(path) - len, "%s/%s.unit", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
sret = read(fd, alias->unit, UNIT_MAX_LEN);
if (sret < 0)
goto error;
close(fd);
if (alias->unit[sret - 1] == '\n')
alias->unit[sret - 1] = '\0';
else
alias->unit[sret] = '\0';
return 0;
error:
close(fd);
alias->unit[0] = '\0';
return -1;
}
static int
perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
char path[PATH_MAX];
size_t len;
int fd;
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
scnprintf(path + len, sizeof(path) - len, "%s/%s.per-pkg", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
close(fd);
alias->per_pkg = true;
return 0;
}
static int perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
char path[PATH_MAX];
size_t len;
int fd;
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
scnprintf(path + len, sizeof(path) - len, "%s/%s.snapshot", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
alias->snapshot = true;
close(fd);
return 0;
}
/* Delete an alias entry. */
static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
{
zfree(&newalias->name);
zfree(&newalias->desc);
zfree(&newalias->long_desc);
zfree(&newalias->topic);
zfree(&newalias->pmu_name);
parse_events_terms__purge(&newalias->terms);
free(newalias);
}
static void perf_pmu__del_aliases(struct perf_pmu *pmu)
{
struct perf_pmu_alias *alias, *tmp;
list_for_each_entry_safe(alias, tmp, &pmu->aliases, list) {
list_del(&alias->list);
perf_pmu_free_alias(alias);
}
}
static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu,
const char *name,
bool load)
{
struct perf_pmu_alias *alias;
if (load && !pmu->sysfs_aliases_loaded)
pmu_aliases_parse(pmu);
list_for_each_entry(alias, &pmu->aliases, list) {
if (!strcasecmp(alias->name, name))
return alias;
}
return NULL;
}
static bool assign_str(const char *name, const char *field, char **old_str,
const char *new_str)
{
if (!*old_str && new_str) {
*old_str = strdup(new_str);
return true;
}
if (!new_str || !strcasecmp(*old_str, new_str))
return false; /* Nothing to update. */
pr_debug("alias %s differs in field '%s' ('%s' != '%s')\n",
name, field, *old_str, new_str);
zfree(old_str);
*old_str = strdup(new_str);
return true;
}
static void read_alias_info(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
if (!alias->from_sysfs || alias->info_loaded)
return;
/*
* load unit name and scale if available
*/
perf_pmu__parse_unit(pmu, alias);
perf_pmu__parse_scale(pmu, alias);
perf_pmu__parse_per_pkg(pmu, alias);
perf_pmu__parse_snapshot(pmu, alias);
}
struct update_alias_data {
struct perf_pmu *pmu;
struct perf_pmu_alias *alias;
};
static int update_alias(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *vdata)
{
struct update_alias_data *data = vdata;
int ret = 0;
read_alias_info(data->pmu, data->alias);
assign_str(pe->name, "desc", &data->alias->desc, pe->desc);
assign_str(pe->name, "long_desc", &data->alias->long_desc, pe->long_desc);
assign_str(pe->name, "topic", &data->alias->topic, pe->topic);
data->alias->per_pkg = pe->perpkg;
if (pe->event) {
parse_events_terms__purge(&data->alias->terms);
ret = parse_events_terms(&data->alias->terms, pe->event, /*input=*/NULL);
}
if (!ret && pe->unit) {
char *unit;
ret = perf_pmu__convert_scale(pe->unit, &unit, &data->alias->scale);
if (!ret)
snprintf(data->alias->unit, sizeof(data->alias->unit), "%s", unit);
}
return ret;
}
static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
const char *desc, const char *val, FILE *val_fd,
const struct pmu_event *pe)
{
struct perf_pmu_alias *alias;
int ret;
const char *long_desc = NULL, *topic = NULL, *unit = NULL, *pmu_name = NULL;
bool deprecated = false, perpkg = false;
if (perf_pmu__find_alias(pmu, name, /*load=*/ false)) {
/* Alias was already created/loaded. */
return 0;
}
if (pe) {
long_desc = pe->long_desc;
topic = pe->topic;
unit = pe->unit;
perpkg = pe->perpkg;
deprecated = pe->deprecated;
pmu_name = pe->pmu;
}
alias = malloc(sizeof(*alias));
if (!alias)
return -ENOMEM;
INIT_LIST_HEAD(&alias->terms);
alias->scale = 1.0;
alias->unit[0] = '\0';
alias->per_pkg = perpkg;
alias->snapshot = false;
alias->deprecated = deprecated;
ret = parse_events_terms(&alias->terms, val, val_fd);
if (ret) {
pr_err("Cannot parse alias %s: %d\n", val, ret);
free(alias);
return ret;
}
alias->name = strdup(name);
alias->desc = desc ? strdup(desc) : NULL;
alias->long_desc = long_desc ? strdup(long_desc) :
desc ? strdup(desc) : NULL;
alias->topic = topic ? strdup(topic) : NULL;
alias->pmu_name = pmu_name ? strdup(pmu_name) : NULL;
if (unit) {
if (perf_pmu__convert_scale(unit, (char **)&unit, &alias->scale) < 0) {
perf_pmu_free_alias(alias);
return -1;
}
snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
}
if (!pe) {
/* Update an event from sysfs with json data. */
struct update_alias_data data = {
.pmu = pmu,
.alias = alias,
};
alias->from_sysfs = true;
if (pmu->events_table) {
if (pmu_events_table__find_event(pmu->events_table, pmu, name,
update_alias, &data) == 0)
pmu->loaded_json_aliases++;
}
}
if (!pe)
pmu->sysfs_aliases++;
else
pmu->loaded_json_aliases++;
list_add_tail(&alias->list, &pmu->aliases);
return 0;
}
static inline bool pmu_alias_info_file(char *name)
{
size_t len;
len = strlen(name);
if (len > 5 && !strcmp(name + len - 5, ".unit"))
return true;
if (len > 6 && !strcmp(name + len - 6, ".scale"))
return true;
if (len > 8 && !strcmp(name + len - 8, ".per-pkg"))
return true;
if (len > 9 && !strcmp(name + len - 9, ".snapshot"))
return true;
return false;
}
/*
* Reading the pmu event aliases definition, which should be located at:
* /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
*/
static int pmu_aliases_parse(struct perf_pmu *pmu)
{
char path[PATH_MAX];
struct dirent *evt_ent;
DIR *event_dir;
size_t len;
int fd, dir_fd;
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
scnprintf(path + len, sizeof(path) - len, "%s/events", pmu->name);
dir_fd = open(path, O_DIRECTORY);
if (dir_fd == -1) {
pmu->sysfs_aliases_loaded = true;
return 0;
}
event_dir = fdopendir(dir_fd);
if (!event_dir){
close (dir_fd);
return -EINVAL;
}
while ((evt_ent = readdir(event_dir))) {
char *name = evt_ent->d_name;
FILE *file;
if (!strcmp(name, ".") || !strcmp(name, ".."))
continue;
/*
* skip info files parsed in perf_pmu__new_alias()
*/
if (pmu_alias_info_file(name))
continue;
fd = openat(dir_fd, name, O_RDONLY);
if (fd == -1) {
pr_debug("Cannot open %s\n", name);
continue;
}
file = fdopen(fd, "r");
if (!file) {
close(fd);
continue;
}
if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL,
/*val=*/ NULL, file, /*pe=*/ NULL) < 0)
pr_debug("Cannot set up %s\n", name);
fclose(file);
}
closedir(event_dir);
close (dir_fd);
pmu->sysfs_aliases_loaded = true;
return 0;
}
static int pmu_alias_terms(struct perf_pmu_alias *alias,
struct list_head *terms)
{
struct parse_events_term *term, *cloned;
LIST_HEAD(list);
int ret;
list_for_each_entry(term, &alias->terms, list) {
ret = parse_events_term__clone(&cloned, term);
if (ret) {
parse_events_terms__purge(&list);
return ret;
}
/*
* Weak terms don't override command line options,
* which we don't want for implicit terms in aliases.
*/
cloned->weak = true;
list_add_tail(&cloned->list, &list);
}
list_splice(&list, terms);
return 0;
}
/*
* Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
* may have a "cpus" file.
*/
static struct perf_cpu_map *pmu_cpumask(int dirfd, const char *name, bool is_core)
{
struct perf_cpu_map *cpus;
const char *templates[] = {
"cpumask",
"cpus",
NULL
};
const char **template;
char pmu_name[PATH_MAX];
struct perf_pmu pmu = {.name = pmu_name};
FILE *file;
strlcpy(pmu_name, name, sizeof(pmu_name));
for (template = templates; *template; template++) {
file = perf_pmu__open_file_at(&pmu, dirfd, *template);
if (!file)
continue;
cpus = perf_cpu_map__read(file);
fclose(file);
if (cpus)
return cpus;
}
/* Nothing found, for core PMUs assume this means all CPUs. */
return is_core ? perf_cpu_map__get(cpu_map__online()) : NULL;
}
static bool pmu_is_uncore(int dirfd, const char *name)
{
int fd;
fd = perf_pmu__pathname_fd(dirfd, name, "cpumask", O_PATH);
if (fd < 0)
return false;
close(fd);
return true;
}
static char *pmu_id(const char *name)
{
char path[PATH_MAX], *str;
size_t len;
perf_pmu__pathname_scnprintf(path, sizeof(path), name, "identifier");
if (filename__read_str(path, &str, &len) < 0)
return NULL;
str[len - 1] = 0; /* remove line feed */
return str;
}
/**
* is_sysfs_pmu_core() - PMU CORE devices have different name other than cpu in
* sysfs on some platforms like ARM or Intel hybrid. Looking for
* possible the cpus file in sysfs files to identify whether this is a
* core device.
* @name: The PMU name such as "cpu_atom".
*/
static int is_sysfs_pmu_core(const char *name)
{
char path[PATH_MAX];
if (!perf_pmu__pathname_scnprintf(path, sizeof(path), name, "cpus"))
return 0;
return file_available(path);
}
char *perf_pmu__getcpuid(struct perf_pmu *pmu)
{
char *cpuid;
static bool printed;
cpuid = getenv("PERF_CPUID");
if (cpuid)
cpuid = strdup(cpuid);
if (!cpuid)
cpuid = get_cpuid_str(pmu);
if (!cpuid)
return NULL;
if (!printed) {
pr_debug("Using CPUID %s\n", cpuid);
printed = true;
}
return cpuid;
}
__weak const struct pmu_events_table *pmu_events_table__find(void)
{
return perf_pmu__find_events_table(NULL);
}
__weak const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
return perf_pmu__find_metrics_table(NULL);
}
/**
* perf_pmu__match_ignoring_suffix - Does the pmu_name match tok ignoring any
* trailing suffix? The Suffix must be in form
* tok_{digits}, or tok{digits}.
* @pmu_name: The pmu_name with possible suffix.
* @tok: The possible match to pmu_name without suffix.
*/
static bool perf_pmu__match_ignoring_suffix(const char *pmu_name, const char *tok)
{
const char *p;
if (strncmp(pmu_name, tok, strlen(tok)))
return false;
p = pmu_name + strlen(tok);
if (*p == 0)
return true;
if (*p == '_')
++p;
/* Ensure we end in a number */
while (1) {
if (!isdigit(*p))
return false;
if (*(++p) == 0)
break;
}
return true;
}
/**
* pmu_uncore_alias_match - does name match the PMU name?
* @pmu_name: the json struct pmu_event name. This may lack a suffix (which
* matches) or be of the form "socket,pmuname" which will match
* "socketX_pmunameY".
* @name: a real full PMU name as from sysfs.
*/
static bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
{
char *tmp = NULL, *tok, *str;
bool res;
if (strchr(pmu_name, ',') == NULL)
return perf_pmu__match_ignoring_suffix(name, pmu_name);
str = strdup(pmu_name);
if (!str)
return false;
/*
* uncore alias may be from different PMU with common prefix
*/
tok = strtok_r(str, ",", &tmp);
if (strncmp(pmu_name, tok, strlen(tok))) {
res = false;
goto out;
}
/*
* Match more complex aliases where the alias name is a comma-delimited
* list of tokens, orderly contained in the matching PMU name.
*
* Example: For alias "socket,pmuname" and PMU "socketX_pmunameY", we
* match "socket" in "socketX_pmunameY" and then "pmuname" in
* "pmunameY".
*/
while (1) {
char *next_tok = strtok_r(NULL, ",", &tmp);
name = strstr(name, tok);
if (!name ||
(!next_tok && !perf_pmu__match_ignoring_suffix(name, tok))) {
res = false;
goto out;
}
if (!next_tok)
break;
tok = next_tok;
name += strlen(tok);
}
res = true;
out:
free(str);
return res;
}
static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *vdata)
{
struct perf_pmu *pmu = vdata;
perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, pe);
return 0;
}
/*
* From the pmu_events_table, find the events that correspond to the given
* PMU and add them to the list 'head'.
*/
void pmu_add_cpu_aliases_table(struct perf_pmu *pmu, const struct pmu_events_table *table)
{
pmu_events_table__for_each_event(table, pmu, pmu_add_cpu_aliases_map_callback, pmu);
}
static void pmu_add_cpu_aliases(struct perf_pmu *pmu)
{
if (!pmu->events_table)
return;
if (pmu->cpu_aliases_added)
return;
pmu_add_cpu_aliases_table(pmu, pmu->events_table);
pmu->cpu_aliases_added = true;
}
static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *vdata)
{
struct perf_pmu *pmu = vdata;
if (!pe->compat || !pe->pmu)
return 0;
if (!strcmp(pmu->id, pe->compat) &&
pmu_uncore_alias_match(pe->pmu, pmu->name)) {
perf_pmu__new_alias(pmu,
pe->name,
pe->desc,
pe->event,
/*val_fd=*/ NULL,
pe);
}
return 0;
}
void pmu_add_sys_aliases(struct perf_pmu *pmu)
{
if (!pmu->id)
return;
pmu_for_each_sys_event(pmu_add_sys_aliases_iter_fn, pmu);
}
struct perf_event_attr * __weak
perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
{
return NULL;
}
const char * __weak
pmu_find_real_name(const char *name)
{
return name;
}
const char * __weak
pmu_find_alias_name(const char *name __maybe_unused)
{
return NULL;
}
static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
{
int max_precise = -1;
perf_pmu__scan_file_at(pmu, dirfd, "caps/max_precise", "%d", &max_precise);
return max_precise;
}
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name)
{
struct perf_pmu *pmu;
__u32 type;
const char *name = pmu_find_real_name(lookup_name);
const char *alias_name;
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return NULL;
pmu->name = strdup(name);
if (!pmu->name)
goto err;
/*
* Read type early to fail fast if a lookup name isn't a PMU. Ensure
* that type value is successfully assigned (return 1).
*/
if (perf_pmu__scan_file_at(pmu, dirfd, "type", "%u", &type) != 1)
goto err;
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
INIT_LIST_HEAD(&pmu->caps);
/*
* The pmu data we store & need consists of the pmu
* type value and format definitions. Load both right
* now.
*/
if (pmu_format(pmu, dirfd, name)) {
free(pmu);
return NULL;
}
pmu->is_core = is_pmu_core(name);
pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
alias_name = pmu_find_alias_name(name);
if (alias_name) {
pmu->alias_name = strdup(alias_name);
if (!pmu->alias_name)
goto err;
}
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(dirfd, name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
pmu->max_precise = pmu_max_precise(dirfd, pmu);
pmu->events_table = perf_pmu__find_events_table(pmu);
pmu_add_sys_aliases(pmu);
list_add_tail(&pmu->list, pmus);
pmu->default_config = perf_pmu__get_default_config(pmu);
return pmu;
err:
zfree(&pmu->name);
free(pmu);
return NULL;
}
/* Creates the PMU when sysfs scanning fails. */
struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus)
{
struct perf_pmu *pmu = zalloc(sizeof(*pmu));
if (!pmu)
return NULL;
pmu->name = strdup("cpu");
if (!pmu->name) {
free(pmu);
return NULL;
}
pmu->is_core = true;
pmu->type = PERF_TYPE_RAW;
pmu->cpus = cpu_map__online();
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
INIT_LIST_HEAD(&pmu->caps);
list_add_tail(&pmu->list, core_pmus);
return pmu;
}
void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
{
struct perf_pmu_format *format;
if (pmu->formats_checked)
return;
pmu->formats_checked = true;
/* fake pmu doesn't have format list */
if (pmu == &perf_pmu__fake)
return;
list_for_each_entry(format, &pmu->format, list) {
perf_pmu_format__load(pmu, format);
if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) {
pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'"
"which is not supported by this version of perf!\n",
pmu->name, format->name, format->value);
return;
}
}
}
bool evsel__is_aux_event(const struct evsel *evsel)
{
struct perf_pmu *pmu = evsel__find_pmu(evsel);
return pmu && pmu->auxtrace;
}
/*
* Set @config_name to @val as long as the user hasn't already set or cleared it
* by passing a config term on the command line.
*
* @val is the value to put into the bits specified by @config_name rather than
* the bit pattern. It is shifted into position by this function, so to set
* something to true, pass 1 for val rather than a pre shifted value.
*/
#define field_prep(_mask, _val) (((_val) << (ffsll(_mask) - 1)) & (_mask))
void evsel__set_config_if_unset(struct perf_pmu *pmu, struct evsel *evsel,
const char *config_name, u64 val)
{
u64 user_bits = 0, bits;
struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG);
if (term)
user_bits = term->val.cfg_chg;
bits = perf_pmu__format_bits(pmu, config_name);
/* Do nothing if the user changed the value */
if (bits & user_bits)
return;
/* Otherwise replace it */
evsel->core.attr.config &= ~bits;
evsel->core.attr.config |= field_prep(bits, val);
}
static struct perf_pmu_format *
pmu_find_format(struct list_head *formats, const char *name)
{
struct perf_pmu_format *format;
list_for_each_entry(format, formats, list)
if (!strcmp(format->name, name))
return format;
return NULL;
}
__u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name)
{
struct perf_pmu_format *format = pmu_find_format(&pmu->format, name);
__u64 bits = 0;
int fbit;
if (!format)
return 0;
for_each_set_bit(fbit, format->bits, PERF_PMU_FORMAT_BITS)
bits |= 1ULL << fbit;
return bits;
}
int perf_pmu__format_type(struct perf_pmu *pmu, const char *name)
{
struct perf_pmu_format *format = pmu_find_format(&pmu->format, name);
if (!format)
return -1;
perf_pmu_format__load(pmu, format);
return format->value;
}
/*
* Sets value based on the format definition (format parameter)
* and unformatted value (value parameter).
*/
static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
bool zero)
{
unsigned long fbit, vbit;
for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
if (!test_bit(fbit, format))
continue;
if (value & (1llu << vbit++))
*v |= (1llu << fbit);
else if (zero)
*v &= ~(1llu << fbit);
}
}
static __u64 pmu_format_max_value(const unsigned long *format)
{
int w;
w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
if (!w)
return 0;
if (w < 64)
return (1ULL << w) - 1;
return -1;
}
/*
* Term is a string term, and might be a param-term. Try to look up it's value
* in the remaining terms.
* - We have a term like "base-or-format-term=param-term",
* - We need to find the value supplied for "param-term" (with param-term named
* in a config string) later on in the term list.
*/
static int pmu_resolve_param_term(struct parse_events_term *term,
struct list_head *head_terms,
__u64 *value)
{
struct parse_events_term *t;
list_for_each_entry(t, head_terms, list) {
if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM &&
t->config && !strcmp(t->config, term->config)) {
t->used = true;
*value = t->val.num;
return 0;
}
}
if (verbose > 0)
printf("Required parameter '%s' not specified\n", term->config);
return -1;
}
static char *pmu_formats_string(struct list_head *formats)
{
struct perf_pmu_format *format;
char *str = NULL;
struct strbuf buf = STRBUF_INIT;
unsigned int i = 0;
if (!formats)
return NULL;
/* sysfs exported terms */
list_for_each_entry(format, formats, list)
if (strbuf_addf(&buf, i++ ? ",%s" : "%s", format->name) < 0)
goto error;
str = strbuf_detach(&buf, NULL);
error:
strbuf_release(&buf);
return str;
}
/*
* Setup one of config[12] attr members based on the
* user input data - term parameter.
*/
static int pmu_config_term(struct perf_pmu *pmu,
struct perf_event_attr *attr,
struct parse_events_term *term,
struct list_head *head_terms,
bool zero, struct parse_events_error *err)
{
struct perf_pmu_format *format;
__u64 *vp;
__u64 val, max_val;
/*
* If this is a parameter we've already used for parameterized-eval,
* skip it in normal eval.
*/
if (term->used)
return 0;
/*
* Hardcoded terms should be already in, so nothing
* to be done for them.
*/
if (parse_events__is_hardcoded_term(term))
return 0;
format = pmu_find_format(&pmu->format, term->config);
if (!format) {
char *pmu_term = pmu_formats_string(&pmu->format);
char *unknown_term;
char *help_msg;
if (asprintf(&unknown_term,
"unknown term '%s' for pmu '%s'",
term->config, pmu->name) < 0)
unknown_term = NULL;
help_msg = parse_events_formats_error_string(pmu_term);
if (err) {
parse_events_error__handle(err, term->err_term,
unknown_term,
help_msg);
} else {
pr_debug("%s (%s)\n", unknown_term, help_msg);
free(unknown_term);
}
free(pmu_term);
return -EINVAL;
}
perf_pmu_format__load(pmu, format);
switch (format->value) {
case PERF_PMU_FORMAT_VALUE_CONFIG:
vp = &attr->config;
break;
case PERF_PMU_FORMAT_VALUE_CONFIG1:
vp = &attr->config1;
break;
case PERF_PMU_FORMAT_VALUE_CONFIG2:
vp = &attr->config2;
break;
case PERF_PMU_FORMAT_VALUE_CONFIG3:
vp = &attr->config3;
break;
default:
return -EINVAL;
}
/*
* Either directly use a numeric term, or try to translate string terms
* using event parameters.
*/
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
if (term->no_value &&
bitmap_weight(format->bits, PERF_PMU_FORMAT_BITS) > 1) {
if (err) {
parse_events_error__handle(err, term->err_val,
strdup("no value assigned for term"),
NULL);
}
return -EINVAL;
}
val = term->val.num;
} else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
if (strcmp(term->val.str, "?")) {
if (verbose > 0) {
pr_info("Invalid sysfs entry %s=%s\n",
term->config, term->val.str);
}
if (err) {
parse_events_error__handle(err, term->err_val,
strdup("expected numeric value"),
NULL);
}
return -EINVAL;
}
if (pmu_resolve_param_term(term, head_terms, &val))
return -EINVAL;
} else
return -EINVAL;
max_val = pmu_format_max_value(format->bits);
if (val > max_val) {
if (err) {
char *err_str;
parse_events_error__handle(err, term->err_val,
asprintf(&err_str,
"value too big for format, maximum is %llu",
(unsigned long long)max_val) < 0
? strdup("value too big for format")
: err_str,
NULL);
return -EINVAL;
}
/*
* Assume we don't care if !err, in which case the value will be
* silently truncated.
*/
}
pmu_format_value(format->bits, val, vp, zero);
return 0;
}
int perf_pmu__config_terms(struct perf_pmu *pmu,
struct perf_event_attr *attr,
struct list_head *head_terms,
bool zero, struct parse_events_error *err)
{
struct parse_events_term *term;
list_for_each_entry(term, head_terms, list) {
if (pmu_config_term(pmu, attr, term, head_terms, zero, err))
return -EINVAL;
}
return 0;
}
/*
* Configures event's 'attr' parameter based on the:
* 1) users input - specified in terms parameter
* 2) pmu format definitions - specified by pmu parameter
*/
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms,
struct parse_events_error *err)
{
bool zero = !!pmu->default_config;
return perf_pmu__config_terms(pmu, attr, head_terms, zero, err);
}
static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
struct parse_events_term *term)
{
struct perf_pmu_alias *alias;
const char *name;
if (parse_events__is_hardcoded_term(term))
return NULL;
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
if (!term->no_value)
return NULL;
if (pmu_find_format(&pmu->format, term->config))
return NULL;
name = term->config;
} else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
if (strcasecmp(term->config, "event"))
return NULL;
name = term->val.str;
} else {
return NULL;
}
alias = perf_pmu__find_alias(pmu, name, /*load=*/ true);
if (alias || pmu->cpu_aliases_added)
return alias;
/* Alias doesn't exist, try to get it from the json events. */
if (pmu->events_table &&
pmu_events_table__find_event(pmu->events_table, pmu, name,
pmu_add_cpu_aliases_map_callback,
pmu) == 0) {
alias = perf_pmu__find_alias(pmu, name, /*load=*/ false);
}
return alias;
}
static int check_info_data(struct perf_pmu *pmu,
struct perf_pmu_alias *alias,
struct perf_pmu_info *info,
struct parse_events_error *err,
int column)
{
read_alias_info(pmu, alias);
/*
* Only one term in event definition can
* define unit, scale and snapshot, fail
* if there's more than one.
*/
if (info->unit && alias->unit[0]) {
parse_events_error__handle(err, column,
strdup("Attempt to set event's unit twice"),
NULL);
return -EINVAL;
}
if (info->scale && alias->scale) {
parse_events_error__handle(err, column,
strdup("Attempt to set event's scale twice"),
NULL);
return -EINVAL;
}
if (info->snapshot && alias->snapshot) {
parse_events_error__handle(err, column,
strdup("Attempt to set event snapshot twice"),
NULL);
return -EINVAL;
}
if (alias->unit[0])
info->unit = alias->unit;
if (alias->scale)
info->scale = alias->scale;
if (alias->snapshot)
info->snapshot = alias->snapshot;
return 0;
}
/*
* Find alias in the terms list and replace it with the terms
* defined for the alias
*/
int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
struct perf_pmu_info *info, struct parse_events_error *err)
{
struct parse_events_term *term, *h;
struct perf_pmu_alias *alias;
int ret;
info->per_pkg = false;
/*
* Mark unit and scale as not set
* (different from default values, see below)
*/
info->unit = NULL;
info->scale = 0.0;
info->snapshot = false;
list_for_each_entry_safe(term, h, head_terms, list) {
alias = pmu_find_alias(pmu, term);
if (!alias)
continue;
ret = pmu_alias_terms(alias, &term->list);
if (ret) {
parse_events_error__handle(err, term->err_term,
strdup("Failure to duplicate terms"),
NULL);
return ret;
}
ret = check_info_data(pmu, alias, info, err, term->err_term);
if (ret)
return ret;
if (alias->per_pkg)
info->per_pkg = true;
list_del_init(&term->list);
parse_events_term__delete(term);
}
/*
* if no unit or scale found in aliases, then
* set defaults as for evsel
* unit cannot left to NULL
*/
if (info->unit == NULL)
info->unit = "";
if (info->scale == 0.0)
info->scale = 1.0;
return 0;
}
struct find_event_args {
const char *event;
void *state;
pmu_event_callback cb;
};
static int find_event_callback(void *state, struct pmu_event_info *info)
{
struct find_event_args *args = state;
if (!strcmp(args->event, info->name))
return args->cb(args->state, info);
return 0;
}
int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb)
{
struct find_event_args args = {
.event = event,
.state = state,
.cb = cb,
};
/* Sub-optimal, but function is only used by tests. */
return perf_pmu__for_each_event(pmu, /*skip_duplicate_pmus=*/ false,
&args, find_event_callback);
}
static void perf_pmu__del_formats(struct list_head *formats)
{
struct perf_pmu_format *fmt, *tmp;
list_for_each_entry_safe(fmt, tmp, formats, list) {
list_del(&fmt->list);
zfree(&fmt->name);
free(fmt);
}
}
bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name)
{
struct perf_pmu_format *format;
list_for_each_entry(format, &pmu->format, list) {
if (!strcmp(format->name, name))
return true;
}
return false;
}
bool is_pmu_core(const char *name)
{
return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name);
}
bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu)
{
return pmu->is_core;
}
bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
{
return !pmu->is_core || perf_pmus__num_core_pmus() == 1;
}
bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name)
{
if (perf_pmu__find_alias(pmu, name, /*load=*/ true) != NULL)
return true;
if (pmu->cpu_aliases_added || !pmu->events_table)
return false;
return pmu_events_table__find_event(pmu->events_table, pmu, name, NULL, NULL) == 0;
}
size_t perf_pmu__num_events(struct perf_pmu *pmu)
{
size_t nr;
if (!pmu->sysfs_aliases_loaded)
pmu_aliases_parse(pmu);
nr = pmu->sysfs_aliases;
if (pmu->cpu_aliases_added)
nr += pmu->loaded_json_aliases;
else if (pmu->events_table)
nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->loaded_json_aliases;
return pmu->selectable ? nr + 1 : nr;
}
static int sub_non_neg(int a, int b)
{
if (b > a)
return 0;
return a - b;
}
static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
const struct perf_pmu_alias *alias, bool skip_duplicate_pmus)
{
struct parse_events_term *term;
int pmu_name_len = skip_duplicate_pmus
? pmu_name_len_no_suffix(pmu->name, /*num=*/NULL)
: (int)strlen(pmu->name);
int used = snprintf(buf, len, "%.*s/%s", pmu_name_len, pmu->name, alias->name);
list_for_each_entry(term, &alias->terms, list) {
if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
used += snprintf(buf + used, sub_non_neg(len, used),
",%s=%s", term->config,
term->val.str);
}
if (sub_non_neg(len, used) > 0) {
buf[used] = '/';
used++;
}
if (sub_non_neg(len, used) > 0) {
buf[used] = '\0';
used++;
} else
buf[len - 1] = '\0';
return buf;
}
int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
void *state, pmu_event_callback cb)
{
char buf[1024];
struct perf_pmu_alias *event;
struct pmu_event_info info = {
.pmu = pmu,
};
int ret = 0;
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
pmu_add_cpu_aliases(pmu);
list_for_each_entry(event, &pmu->aliases, list) {
size_t buf_used;
info.pmu_name = event->pmu_name ?: pmu->name;
info.alias = NULL;
if (event->desc) {
info.name = event->name;
buf_used = 0;
} else {
info.name = format_alias(buf, sizeof(buf), pmu, event,
skip_duplicate_pmus);
if (pmu->is_core) {
info.alias = info.name;
info.name = event->name;
}
buf_used = strlen(buf) + 1;
}
info.scale_unit = NULL;
if (strlen(event->unit) || event->scale != 1.0) {
info.scale_unit = buf + buf_used;
buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
"%G%s", event->scale, event->unit) + 1;
}
info.desc = event->desc;
info.long_desc = event->long_desc;
info.encoding_desc = buf + buf_used;
parse_events_term__to_strbuf(&event->terms, &sb);
buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
"%s/%s/", info.pmu_name, sb.buf) + 1;
info.topic = event->topic;
info.str = sb.buf;
info.deprecated = event->deprecated;
ret = cb(state, &info);
if (ret)
goto out;
strbuf_setlen(&sb, /*len=*/ 0);
}
if (pmu->selectable) {
info.name = buf;
snprintf(buf, sizeof(buf), "%s//", pmu->name);
info.alias = NULL;
info.scale_unit = NULL;
info.desc = NULL;
info.long_desc = NULL;
info.encoding_desc = NULL;
info.topic = NULL;
info.pmu_name = pmu->name;
info.deprecated = false;
ret = cb(state, &info);
}
out:
strbuf_release(&sb);
return ret;
}
bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name)
{
return !strcmp(pmu->name, pmu_name) ||
(pmu->is_uncore && pmu_uncore_alias_match(pmu_name, pmu->name)) ||
/*
* jevents and tests use default_core as a marker for any core
* PMU as the PMU name varies across architectures.
*/
(pmu->is_core && !strcmp(pmu_name, "default_core"));
}
bool perf_pmu__is_software(const struct perf_pmu *pmu)
{
if (pmu->is_core || pmu->is_uncore || pmu->auxtrace)
return false;
switch (pmu->type) {
case PERF_TYPE_HARDWARE: return false;
case PERF_TYPE_SOFTWARE: return true;
case PERF_TYPE_TRACEPOINT: return true;
case PERF_TYPE_HW_CACHE: return false;
case PERF_TYPE_RAW: return false;
case PERF_TYPE_BREAKPOINT: return true;
default: break;
}
return !strcmp(pmu->name, "kprobe") || !strcmp(pmu->name, "uprobe");
}
FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
{
char path[PATH_MAX];
if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, name) ||
!file_available(path))
return NULL;
return fopen(path, "r");
}
FILE *perf_pmu__open_file_at(struct perf_pmu *pmu, int dirfd, const char *name)
{
int fd;
fd = perf_pmu__pathname_fd(dirfd, pmu->name, name, O_RDONLY);
if (fd < 0)
return NULL;
return fdopen(fd, "r");
}
int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
...)
{
va_list args;
FILE *file;
int ret = EOF;
va_start(args, fmt);
file = perf_pmu__open_file(pmu, name);
if (file) {
ret = vfscanf(file, fmt, args);
fclose(file);
}
va_end(args);
return ret;
}
int perf_pmu__scan_file_at(struct perf_pmu *pmu, int dirfd, const char *name,
const char *fmt, ...)
{
va_list args;
FILE *file;
int ret = EOF;
va_start(args, fmt);
file = perf_pmu__open_file_at(pmu, dirfd, name);
if (file) {
ret = vfscanf(file, fmt, args);
fclose(file);
}
va_end(args);
return ret;
}
bool perf_pmu__file_exists(struct perf_pmu *pmu, const char *name)
{
char path[PATH_MAX];
if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, name))
return false;
return file_available(path);
}
static int perf_pmu__new_caps(struct list_head *list, char *name, char *value)
{
struct perf_pmu_caps *caps = zalloc(sizeof(*caps));
if (!caps)
return -ENOMEM;
caps->name = strdup(name);
if (!caps->name)
goto free_caps;
caps->value = strndup(value, strlen(value) - 1);
if (!caps->value)
goto free_name;
list_add_tail(&caps->list, list);
return 0;
free_name:
zfree(&caps->name);
free_caps:
free(caps);
return -ENOMEM;
}
static void perf_pmu__del_caps(struct perf_pmu *pmu)
{
struct perf_pmu_caps *caps, *tmp;
list_for_each_entry_safe(caps, tmp, &pmu->caps, list) {
list_del(&caps->list);
zfree(&caps->name);
zfree(&caps->value);
free(caps);
}
}
/*
* Reading/parsing the given pmu capabilities, which should be located at:
* /sys/bus/event_source/devices/<dev>/caps as sysfs group attributes.
* Return the number of capabilities
*/
int perf_pmu__caps_parse(struct perf_pmu *pmu)
{
struct stat st;
char caps_path[PATH_MAX];
DIR *caps_dir;
struct dirent *evt_ent;
int caps_fd;
if (pmu->caps_initialized)
return pmu->nr_caps;
pmu->nr_caps = 0;
if (!perf_pmu__pathname_scnprintf(caps_path, sizeof(caps_path), pmu->name, "caps"))
return -1;
if (stat(caps_path, &st) < 0) {
pmu->caps_initialized = true;
return 0; /* no error if caps does not exist */
}
caps_dir = opendir(caps_path);
if (!caps_dir)
return -EINVAL;
caps_fd = dirfd(caps_dir);
while ((evt_ent = readdir(caps_dir)) != NULL) {
char *name = evt_ent->d_name;
char value[128];
FILE *file;
int fd;
if (!strcmp(name, ".") || !strcmp(name, ".."))
continue;
fd = openat(caps_fd, name, O_RDONLY);
if (fd == -1)
continue;
file = fdopen(fd, "r");
if (!file) {
close(fd);
continue;
}
if (!fgets(value, sizeof(value), file) ||
(perf_pmu__new_caps(&pmu->caps, name, value) < 0)) {
fclose(file);
continue;
}
pmu->nr_caps++;
fclose(file);
}
closedir(caps_dir);
pmu->caps_initialized = true;
return pmu->nr_caps;
}
static void perf_pmu__compute_config_masks(struct perf_pmu *pmu)
{
struct perf_pmu_format *format;
if (pmu->config_masks_computed)
return;
list_for_each_entry(format, &pmu->format, list) {
unsigned int i;
__u64 *mask;
if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END)
continue;
pmu->config_masks_present = true;
mask = &pmu->config_masks[format->value];
for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
*mask |= 1ULL << i;
}
pmu->config_masks_computed = true;
}
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
const char *name, int config_num,
const char *config_name)
{
__u64 bits;
char buf[100];
perf_pmu__compute_config_masks(pmu);
/*
* Kernel doesn't export any valid format bits.
*/
if (!pmu->config_masks_present)
return;
bits = config & ~pmu->config_masks[config_num];
if (bits == 0)
return;
bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
pr_warning("WARNING: event '%s' not valid (bits %s of %s "
"'%llx' not supported by kernel)!\n",
name ?: "N/A", buf, config_name, config);
}
int perf_pmu__match(const char *pattern, const char *name, const char *tok)
{
if (!name)
return -1;
if (fnmatch(pattern, name, 0))
return -1;
if (tok && !perf_pmu__match_ignoring_suffix(name, tok))
return -1;
return 0;
}
double __weak perf_pmu__cpu_slots_per_cycle(void)
{
return NAN;
}
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size)
{
const char *sysfs = sysfs__mountpoint();
if (!sysfs)
return 0;
return scnprintf(pathname, size, "%s/bus/event_source/devices/", sysfs);
}
int perf_pmu__event_source_devices_fd(void)
{
char path[PATH_MAX];
const char *sysfs = sysfs__mountpoint();
if (!sysfs)
return -1;
scnprintf(path, sizeof(path), "%s/bus/event_source/devices/", sysfs);
return open(path, O_DIRECTORY);
}
/*
* Fill 'buf' with the path to a file or folder in 'pmu_name' in
* sysfs. For example if pmu_name = "cs_etm" and 'filename' = "format"
* then pathname will be filled with
* "/sys/bus/event_source/devices/cs_etm/format"
*
* Return 0 if the sysfs mountpoint couldn't be found, if no characters were
* written or if the buffer size is exceeded.
*/
int perf_pmu__pathname_scnprintf(char *buf, size_t size,
const char *pmu_name, const char *filename)
{
size_t len;
len = perf_pmu__event_source_devices_scnprintf(buf, size);
if (!len || (len + strlen(pmu_name) + strlen(filename) + 1) >= size)
return 0;
return scnprintf(buf + len, size - len, "%s/%s", pmu_name, filename);
}
int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags)
{
char path[PATH_MAX];
scnprintf(path, sizeof(path), "%s/%s", pmu_name, filename);
return openat(dirfd, path, flags);
}
void perf_pmu__delete(struct perf_pmu *pmu)
{
perf_pmu__del_formats(&pmu->format);
perf_pmu__del_aliases(pmu);
perf_pmu__del_caps(pmu);
perf_cpu_map__put(pmu->cpus);
zfree(&pmu->default_config);
zfree(&pmu->name);
zfree(&pmu->alias_name);
zfree(&pmu->id);
free(pmu);
}
struct perf_pmu *pmu__find_core_pmu(void)
{
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan_core(pmu))) {
/*
* The cpumap should cover all CPUs. Otherwise, some CPUs may
* not support some events or have different event IDs.
*/
if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
return NULL;
return pmu;
}
return NULL;
}
| linux-master | tools/perf/util/pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008,2009, Steven Rostedt <[email protected]>
*/
#include <dirent.h>
#include <mntent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <stdbool.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <internal/lib.h> // page_size
#include <sys/param.h>
#include "trace-event.h"
#include "tracepoint.h"
#include <api/fs/tracing_path.h>
#include "evsel.h"
#include "debug.h"
#include "util.h"
#define VERSION "0.6"
#define MAX_EVENT_LENGTH 512
static int output_fd;
struct tracepoint_path {
char *system;
char *name;
struct tracepoint_path *next;
};
/* unfortunately, you can not stat debugfs or proc files for size */
static int record_file(const char *file, ssize_t hdr_sz)
{
unsigned long long size = 0;
char buf[BUFSIZ], *sizep;
off_t hdr_pos = lseek(output_fd, 0, SEEK_CUR);
int r, fd;
int err = -EIO;
fd = open(file, O_RDONLY);
if (fd < 0) {
pr_debug("Can't read '%s'", file);
return -errno;
}
/* put in zeros for file size, then fill true size later */
if (hdr_sz) {
if (write(output_fd, &size, hdr_sz) != hdr_sz)
goto out;
}
do {
r = read(fd, buf, BUFSIZ);
if (r > 0) {
size += r;
if (write(output_fd, buf, r) != r)
goto out;
}
} while (r > 0);
/* ugh, handle big-endian hdr_size == 4 */
sizep = (char*)&size;
if (host_is_bigendian())
sizep += sizeof(u64) - hdr_sz;
if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0) {
pr_debug("writing file size failed\n");
goto out;
}
err = 0;
out:
close(fd);
return err;
}
static int record_header_files(void)
{
char *path = get_events_file("header_page");
struct stat st;
int err = -EIO;
if (!path) {
pr_debug("can't get tracing/events/header_page");
return -ENOMEM;
}
if (stat(path, &st) < 0) {
pr_debug("can't read '%s'", path);
goto out;
}
if (write(output_fd, "header_page", 12) != 12) {
pr_debug("can't write header_page\n");
goto out;
}
if (record_file(path, 8) < 0) {
pr_debug("can't record header_page file\n");
goto out;
}
put_events_file(path);
path = get_events_file("header_event");
if (!path) {
pr_debug("can't get tracing/events/header_event");
err = -ENOMEM;
goto out;
}
if (stat(path, &st) < 0) {
pr_debug("can't read '%s'", path);
goto out;
}
if (write(output_fd, "header_event", 13) != 13) {
pr_debug("can't write header_event\n");
goto out;
}
if (record_file(path, 8) < 0) {
pr_debug("can't record header_event file\n");
goto out;
}
err = 0;
out:
put_events_file(path);
return err;
}
static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
{
while (tps) {
if (!strcmp(sys, tps->name))
return true;
tps = tps->next;
}
return false;
}
#define for_each_event_tps(dir, dent, tps) \
while ((dent = readdir(dir))) \
if (dent->d_type == DT_DIR && \
(strcmp(dent->d_name, ".")) && \
(strcmp(dent->d_name, ".."))) \
static int copy_event_system(const char *sys, struct tracepoint_path *tps)
{
struct dirent *dent;
struct stat st;
char *format;
DIR *dir;
int count = 0;
int ret;
int err;
dir = opendir(sys);
if (!dir) {
pr_debug("can't read directory '%s'", sys);
return -errno;
}
for_each_event_tps(dir, dent, tps) {
if (!name_in_tp_list(dent->d_name, tps))
continue;
if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
err = -ENOMEM;
goto out;
}
ret = stat(format, &st);
free(format);
if (ret < 0)
continue;
count++;
}
if (write(output_fd, &count, 4) != 4) {
err = -EIO;
pr_debug("can't write count\n");
goto out;
}
rewinddir(dir);
for_each_event_tps(dir, dent, tps) {
if (!name_in_tp_list(dent->d_name, tps))
continue;
if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
err = -ENOMEM;
goto out;
}
ret = stat(format, &st);
if (ret >= 0) {
err = record_file(format, 8);
if (err) {
free(format);
goto out;
}
}
free(format);
}
err = 0;
out:
closedir(dir);
return err;
}
static int record_ftrace_files(struct tracepoint_path *tps)
{
char *path;
int ret;
path = get_events_file("ftrace");
if (!path) {
pr_debug("can't get tracing/events/ftrace");
return -ENOMEM;
}
ret = copy_event_system(path, tps);
put_tracing_file(path);
return ret;
}
static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
{
while (tps) {
if (!strcmp(sys, tps->system))
return true;
tps = tps->next;
}
return false;
}
static int record_event_files(struct tracepoint_path *tps)
{
struct dirent *dent;
struct stat st;
char *path;
char *sys;
DIR *dir;
int count = 0;
int ret;
int err;
path = get_tracing_file("events");
if (!path) {
pr_debug("can't get tracing/events");
return -ENOMEM;
}
dir = opendir(path);
if (!dir) {
err = -errno;
pr_debug("can't read directory '%s'", path);
goto out;
}
for_each_event_tps(dir, dent, tps) {
if (strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
continue;
count++;
}
if (write(output_fd, &count, 4) != 4) {
err = -EIO;
pr_debug("can't write count\n");
goto out;
}
rewinddir(dir);
for_each_event_tps(dir, dent, tps) {
if (strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
continue;
if (asprintf(&sys, "%s/%s", path, dent->d_name) < 0) {
err = -ENOMEM;
goto out;
}
ret = stat(sys, &st);
if (ret >= 0) {
ssize_t size = strlen(dent->d_name) + 1;
if (write(output_fd, dent->d_name, size) != size ||
copy_event_system(sys, tps) < 0) {
err = -EIO;
free(sys);
goto out;
}
}
free(sys);
}
err = 0;
out:
closedir(dir);
put_tracing_file(path);
return err;
}
static int record_proc_kallsyms(void)
{
unsigned long long size = 0;
/*
* Just to keep older perf.data file parsers happy, record a zero
* sized kallsyms file, i.e. do the same thing that was done when
* /proc/kallsyms (or something specified via --kallsyms, in a
* different path) couldn't be read.
*/
return write(output_fd, &size, 4) != 4 ? -EIO : 0;
}
static int record_ftrace_printk(void)
{
unsigned int size;
char *path;
struct stat st;
int ret, err = 0;
path = get_tracing_file("printk_formats");
if (!path) {
pr_debug("can't get tracing/printk_formats");
return -ENOMEM;
}
ret = stat(path, &st);
if (ret < 0) {
/* not found */
size = 0;
if (write(output_fd, &size, 4) != 4)
err = -EIO;
goto out;
}
err = record_file(path, 4);
out:
put_tracing_file(path);
return err;
}
static int record_saved_cmdline(void)
{
unsigned long long size;
char *path;
struct stat st;
int ret, err = 0;
path = get_tracing_file("saved_cmdlines");
if (!path) {
pr_debug("can't get tracing/saved_cmdline");
return -ENOMEM;
}
ret = stat(path, &st);
if (ret < 0) {
/* not found */
size = 0;
if (write(output_fd, &size, 8) != 8)
err = -EIO;
goto out;
}
err = record_file(path, 8);
out:
put_tracing_file(path);
return err;
}
static void
put_tracepoints_path(struct tracepoint_path *tps)
{
while (tps) {
struct tracepoint_path *t = tps;
tps = tps->next;
zfree(&t->name);
zfree(&t->system);
free(t);
}
}
static struct tracepoint_path *tracepoint_id_to_path(u64 config)
{
struct tracepoint_path *path = NULL;
DIR *sys_dir, *evt_dir;
struct dirent *sys_dirent, *evt_dirent;
char id_buf[24];
int fd;
u64 id;
char evt_path[MAXPATHLEN];
char *dir_path;
sys_dir = tracing_events__opendir();
if (!sys_dir)
return NULL;
for_each_subsystem(sys_dir, sys_dirent) {
dir_path = get_events_file(sys_dirent->d_name);
if (!dir_path)
continue;
evt_dir = opendir(dir_path);
if (!evt_dir)
goto next;
for_each_event(dir_path, evt_dir, evt_dirent) {
scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
evt_dirent->d_name);
fd = open(evt_path, O_RDONLY);
if (fd < 0)
continue;
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
close(fd);
continue;
}
close(fd);
id = atoll(id_buf);
if (id == config) {
put_events_file(dir_path);
closedir(evt_dir);
closedir(sys_dir);
path = zalloc(sizeof(*path));
if (!path)
return NULL;
if (asprintf(&path->system, "%.*s",
MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
free(path);
return NULL;
}
if (asprintf(&path->name, "%.*s",
MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
zfree(&path->system);
free(path);
return NULL;
}
return path;
}
}
closedir(evt_dir);
next:
put_events_file(dir_path);
}
closedir(sys_dir);
return NULL;
}
char *tracepoint_id_to_name(u64 config)
{
struct tracepoint_path *path = tracepoint_id_to_path(config);
char *buf = NULL;
if (path && asprintf(&buf, "%s:%s", path->system, path->name) < 0)
buf = NULL;
put_tracepoints_path(path);
return buf;
}
static struct tracepoint_path *tracepoint_name_to_path(const char *name)
{
struct tracepoint_path *path = zalloc(sizeof(*path));
char *str = strchr(name, ':');
if (path == NULL || str == NULL) {
free(path);
return NULL;
}
path->system = strndup(name, str - name);
path->name = strdup(str+1);
if (path->system == NULL || path->name == NULL) {
zfree(&path->system);
zfree(&path->name);
zfree(&path);
}
return path;
}
static struct tracepoint_path *
get_tracepoints_path(struct list_head *pattrs)
{
struct tracepoint_path path, *ppath = &path;
struct evsel *pos;
int nr_tracepoints = 0;
list_for_each_entry(pos, pattrs, core.node) {
if (pos->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
++nr_tracepoints;
if (pos->name) {
ppath->next = tracepoint_name_to_path(pos->name);
if (ppath->next)
goto next;
if (strchr(pos->name, ':') == NULL)
goto try_id;
goto error;
}
try_id:
ppath->next = tracepoint_id_to_path(pos->core.attr.config);
if (!ppath->next) {
error:
pr_debug("No memory to alloc tracepoints list\n");
put_tracepoints_path(path.next);
return NULL;
}
next:
ppath = ppath->next;
}
return nr_tracepoints > 0 ? path.next : NULL;
}
bool have_tracepoints(struct list_head *pattrs)
{
struct evsel *pos;
list_for_each_entry(pos, pattrs, core.node)
if (pos->core.attr.type == PERF_TYPE_TRACEPOINT)
return true;
return false;
}
static int tracing_data_header(void)
{
char buf[20];
ssize_t size;
/* just guessing this is someone's birthday.. ;) */
buf[0] = 23;
buf[1] = 8;
buf[2] = 68;
memcpy(buf + 3, "tracing", 7);
if (write(output_fd, buf, 10) != 10)
return -1;
size = strlen(VERSION) + 1;
if (write(output_fd, VERSION, size) != size)
return -1;
/* save endian */
if (host_is_bigendian())
buf[0] = 1;
else
buf[0] = 0;
if (write(output_fd, buf, 1) != 1)
return -1;
/* save size of long */
buf[0] = sizeof(long);
if (write(output_fd, buf, 1) != 1)
return -1;
/* save page_size */
if (write(output_fd, &page_size, 4) != 4)
return -1;
return 0;
}
struct tracing_data *tracing_data_get(struct list_head *pattrs,
int fd, bool temp)
{
struct tracepoint_path *tps;
struct tracing_data *tdata;
int err;
output_fd = fd;
tps = get_tracepoints_path(pattrs);
if (!tps)
return NULL;
tdata = malloc(sizeof(*tdata));
if (!tdata)
return NULL;
tdata->temp = temp;
tdata->size = 0;
if (temp) {
int temp_fd;
snprintf(tdata->temp_file, sizeof(tdata->temp_file),
"/tmp/perf-XXXXXX");
if (!mkstemp(tdata->temp_file)) {
pr_debug("Can't make temp file");
free(tdata);
return NULL;
}
temp_fd = open(tdata->temp_file, O_RDWR);
if (temp_fd < 0) {
pr_debug("Can't read '%s'", tdata->temp_file);
free(tdata);
return NULL;
}
/*
* Set the temp file the default output, so all the
* tracing data are stored into it.
*/
output_fd = temp_fd;
}
err = tracing_data_header();
if (err)
goto out;
err = record_header_files();
if (err)
goto out;
err = record_ftrace_files(tps);
if (err)
goto out;
err = record_event_files(tps);
if (err)
goto out;
err = record_proc_kallsyms();
if (err)
goto out;
err = record_ftrace_printk();
if (err)
goto out;
err = record_saved_cmdline();
out:
/*
* All tracing data are stored by now, we can restore
* the default output file in case we used temp file.
*/
if (temp) {
tdata->size = lseek(output_fd, 0, SEEK_CUR);
close(output_fd);
output_fd = fd;
}
if (err)
zfree(&tdata);
put_tracepoints_path(tps);
return tdata;
}
int tracing_data_put(struct tracing_data *tdata)
{
int err = 0;
if (tdata->temp) {
err = record_file(tdata->temp_file, 0);
unlink(tdata->temp_file);
}
free(tdata);
return err;
}
int read_tracing_data(int fd, struct list_head *pattrs)
{
int err;
struct tracing_data *tdata;
/*
* We work over the real file, so we can write data
* directly, no temp file is needed.
*/
tdata = tracing_data_get(pattrs, fd, false);
if (!tdata)
return -ENOMEM;
err = tracing_data_put(tdata);
return err;
}
| linux-master | tools/perf/util/trace-event-info.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "evsel.h"
#include "counts.h"
#include <perf/threadmap.h>
#include <linux/zalloc.h>
struct perf_counts *perf_counts__new(int ncpus, int nthreads)
{
struct perf_counts *counts = zalloc(sizeof(*counts));
if (counts) {
struct xyarray *values;
values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values));
if (!values) {
free(counts);
return NULL;
}
counts->values = values;
values = xyarray__new(ncpus, nthreads, sizeof(bool));
if (!values) {
xyarray__delete(counts->values);
free(counts);
return NULL;
}
counts->loaded = values;
}
return counts;
}
void perf_counts__delete(struct perf_counts *counts)
{
if (counts) {
xyarray__delete(counts->loaded);
xyarray__delete(counts->values);
free(counts);
}
}
void perf_counts__reset(struct perf_counts *counts)
{
xyarray__reset(counts->loaded);
xyarray__reset(counts->values);
}
void evsel__reset_counts(struct evsel *evsel)
{
perf_counts__reset(evsel->counts);
}
int evsel__alloc_counts(struct evsel *evsel)
{
struct perf_cpu_map *cpus = evsel__cpus(evsel);
int nthreads = perf_thread_map__nr(evsel->core.threads);
evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads);
return evsel->counts != NULL ? 0 : -ENOMEM;
}
void evsel__free_counts(struct evsel *evsel)
{
perf_counts__delete(evsel->counts);
evsel->counts = NULL;
}
| linux-master | tools/perf/util/counts.c |
// SPDX-License-Identifier: GPL-2.0
#include <byteswap.h>
#include "memswap.h"
#include <linux/types.h>
void mem_bswap_32(void *src, int byte_size)
{
u32 *m = src;
while (byte_size > 0) {
*m = bswap_32(*m);
byte_size -= sizeof(u32);
++m;
}
}
void mem_bswap_64(void *src, int byte_size)
{
u64 *m = src;
while (byte_size > 0) {
*m = bswap_64(*m);
byte_size -= sizeof(u64);
++m;
}
}
| linux-master | tools/perf/util/memswap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Arm Statistical Profiling Extensions (SPE) support
* Copyright (c) 2017-2018, Arm Ltd.
*/
#include <byteswap.h>
#include <endian.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include <unistd.h>
#include "auxtrace.h"
#include "color.h"
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
#include "machine.h"
#include "session.h"
#include "symbol.h"
#include "thread.h"
#include "thread-stack.h"
#include "tsc.h"
#include "tool.h"
#include "util/synthetic-events.h"
#include "arm-spe.h"
#include "arm-spe-decoder/arm-spe-decoder.h"
#include "arm-spe-decoder/arm-spe-pkt-decoder.h"
#include "../../arch/arm64/include/asm/cputype.h"
#define MAX_TIMESTAMP (~0ULL)
struct arm_spe {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
struct auxtrace_heap heap;
struct itrace_synth_opts synth_opts;
u32 auxtrace_type;
struct perf_session *session;
struct machine *machine;
u32 pmu_type;
u64 midr;
struct perf_tsc_conversion tc;
u8 timeless_decoding;
u8 data_queued;
u64 sample_type;
u8 sample_flc;
u8 sample_llc;
u8 sample_tlb;
u8 sample_branch;
u8 sample_remote_access;
u8 sample_memory;
u8 sample_instructions;
u64 instructions_sample_period;
u64 l1d_miss_id;
u64 l1d_access_id;
u64 llc_miss_id;
u64 llc_access_id;
u64 tlb_miss_id;
u64 tlb_access_id;
u64 branch_miss_id;
u64 remote_access_id;
u64 memory_id;
u64 instructions_id;
u64 kernel_start;
unsigned long num_events;
u8 use_ctx_pkt_for_pid;
};
struct arm_spe_queue {
struct arm_spe *spe;
unsigned int queue_nr;
struct auxtrace_buffer *buffer;
struct auxtrace_buffer *old_buffer;
union perf_event *event_buf;
bool on_heap;
bool done;
pid_t pid;
pid_t tid;
int cpu;
struct arm_spe_decoder *decoder;
u64 time;
u64 timestamp;
struct thread *thread;
u64 period_instructions;
};
static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
unsigned char *buf, size_t len)
{
struct arm_spe_pkt packet;
size_t pos = 0;
int ret, pkt_len, i;
char desc[ARM_SPE_PKT_DESC_MAX];
const char *color = PERF_COLOR_BLUE;
color_fprintf(stdout, color,
". ... ARM SPE data: size %#zx bytes\n",
len);
while (len) {
ret = arm_spe_get_packet(buf, len, &packet);
if (ret > 0)
pkt_len = ret;
else
pkt_len = 1;
printf(".");
color_fprintf(stdout, color, " %08x: ", pos);
for (i = 0; i < pkt_len; i++)
color_fprintf(stdout, color, " %02x", buf[i]);
for (; i < 16; i++)
color_fprintf(stdout, color, " ");
if (ret > 0) {
ret = arm_spe_pkt_desc(&packet, desc,
ARM_SPE_PKT_DESC_MAX);
if (!ret)
color_fprintf(stdout, color, " %s\n", desc);
} else {
color_fprintf(stdout, color, " Bad packet!\n");
}
pos += pkt_len;
buf += pkt_len;
len -= pkt_len;
}
}
static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
size_t len)
{
printf(".\n");
arm_spe_dump(spe, buf, len);
}
static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
{
struct arm_spe_queue *speq = data;
struct auxtrace_buffer *buffer = speq->buffer;
struct auxtrace_buffer *old_buffer = speq->old_buffer;
struct auxtrace_queue *queue;
queue = &speq->spe->queues.queue_array[speq->queue_nr];
buffer = auxtrace_buffer__next(queue, buffer);
/* If no more data, drop the previous auxtrace_buffer and return */
if (!buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
b->len = 0;
return 0;
}
speq->buffer = buffer;
/* If the aux_buffer doesn't have data associated, try to load it */
if (!buffer->data) {
/* get the file desc associated with the perf data file */
int fd = perf_data__fd(speq->spe->session->data);
buffer->data = auxtrace_buffer__get_data(buffer, fd);
if (!buffer->data)
return -ENOMEM;
}
b->len = buffer->size;
b->buf = buffer->data;
if (b->len) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
speq->old_buffer = buffer;
} else {
auxtrace_buffer__drop_data(buffer);
return arm_spe_get_trace(b, data);
}
return 0;
}
static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
unsigned int queue_nr)
{
struct arm_spe_params params = { .get_trace = 0, };
struct arm_spe_queue *speq;
speq = zalloc(sizeof(*speq));
if (!speq)
return NULL;
speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
if (!speq->event_buf)
goto out_free;
speq->spe = spe;
speq->queue_nr = queue_nr;
speq->pid = -1;
speq->tid = -1;
speq->cpu = -1;
speq->period_instructions = 0;
/* params set */
params.get_trace = arm_spe_get_trace;
params.data = speq;
/* create new decoder */
speq->decoder = arm_spe_decoder_new(¶ms);
if (!speq->decoder)
goto out_free;
return speq;
out_free:
zfree(&speq->event_buf);
free(speq);
return NULL;
}
static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
{
return ip >= spe->kernel_start ?
PERF_RECORD_MISC_KERNEL :
PERF_RECORD_MISC_USER;
}
static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
struct auxtrace_queue *queue)
{
struct arm_spe_queue *speq = queue->priv;
pid_t tid;
tid = machine__get_current_tid(spe->machine, speq->cpu);
if (tid != -1) {
speq->tid = tid;
thread__zput(speq->thread);
} else
speq->tid = queue->tid;
if ((!speq->thread) && (speq->tid != -1)) {
speq->thread = machine__find_thread(spe->machine, -1,
speq->tid);
}
if (speq->thread) {
speq->pid = thread__pid(speq->thread);
if (queue->cpu == -1)
speq->cpu = thread__cpu(speq->thread);
}
}
static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
{
struct arm_spe *spe = speq->spe;
int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
if (err)
return err;
arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
return 0;
}
static struct simd_flags arm_spe__synth_simd_flags(const struct arm_spe_record *record)
{
struct simd_flags simd_flags = {};
if ((record->op & ARM_SPE_OP_LDST) && (record->op & ARM_SPE_OP_SVE_LDST))
simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
if ((record->op & ARM_SPE_OP_OTHER) && (record->op & ARM_SPE_OP_SVE_OTHER))
simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
simd_flags.pred |= SIMD_OP_FLAGS_PRED_PARTIAL;
if (record->type & ARM_SPE_SVE_EMPTY_PRED)
simd_flags.pred |= SIMD_OP_FLAGS_PRED_EMPTY;
return simd_flags;
}
static void arm_spe_prep_sample(struct arm_spe *spe,
struct arm_spe_queue *speq,
union perf_event *event,
struct perf_sample *sample)
{
struct arm_spe_record *record = &speq->decoder->record;
if (!spe->timeless_decoding)
sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
sample->ip = record->from_ip;
sample->cpumode = arm_spe_cpumode(spe, sample->ip);
sample->pid = speq->pid;
sample->tid = speq->tid;
sample->period = 1;
sample->cpu = speq->cpu;
sample->simd_flags = arm_spe__synth_simd_flags(record);
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = sample->cpumode;
event->sample.header.size = sizeof(struct perf_event_header);
}
static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
{
event->header.size = perf_event__sample_event_size(sample, type, 0);
return perf_event__synthesize_sample(event, type, 0, sample);
}
static inline int
arm_spe_deliver_synth_event(struct arm_spe *spe,
struct arm_spe_queue *speq __maybe_unused,
union perf_event *event,
struct perf_sample *sample)
{
int ret;
if (spe->synth_opts.inject) {
ret = arm_spe__inject_event(event, sample, spe->sample_type);
if (ret)
return ret;
}
ret = perf_session__deliver_synth_event(spe->session, event, sample);
if (ret)
pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
return ret;
}
static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
u64 spe_events_id, u64 data_src)
{
struct arm_spe *spe = speq->spe;
struct arm_spe_record *record = &speq->decoder->record;
union perf_event *event = speq->event_buf;
struct perf_sample sample = { .ip = 0, };
arm_spe_prep_sample(spe, speq, event, &sample);
sample.id = spe_events_id;
sample.stream_id = spe_events_id;
sample.addr = record->virt_addr;
sample.phys_addr = record->phys_addr;
sample.data_src = data_src;
sample.weight = record->latency;
return arm_spe_deliver_synth_event(spe, speq, event, &sample);
}
static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
u64 spe_events_id)
{
struct arm_spe *spe = speq->spe;
struct arm_spe_record *record = &speq->decoder->record;
union perf_event *event = speq->event_buf;
struct perf_sample sample = { .ip = 0, };
arm_spe_prep_sample(spe, speq, event, &sample);
sample.id = spe_events_id;
sample.stream_id = spe_events_id;
sample.addr = record->to_ip;
sample.weight = record->latency;
return arm_spe_deliver_synth_event(spe, speq, event, &sample);
}
static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
u64 spe_events_id, u64 data_src)
{
struct arm_spe *spe = speq->spe;
struct arm_spe_record *record = &speq->decoder->record;
union perf_event *event = speq->event_buf;
struct perf_sample sample = { .ip = 0, };
/*
* Handles perf instruction sampling period.
*/
speq->period_instructions++;
if (speq->period_instructions < spe->instructions_sample_period)
return 0;
speq->period_instructions = 0;
arm_spe_prep_sample(spe, speq, event, &sample);
sample.id = spe_events_id;
sample.stream_id = spe_events_id;
sample.addr = record->virt_addr;
sample.phys_addr = record->phys_addr;
sample.data_src = data_src;
sample.period = spe->instructions_sample_period;
sample.weight = record->latency;
return arm_spe_deliver_synth_event(spe, speq, event, &sample);
}
static const struct midr_range neoverse_spe[] = {
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
{},
};
static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
union perf_mem_data_src *data_src)
{
/*
* Even though four levels of cache hierarchy are possible, no known
* production Neoverse systems currently include more than three levels
* so for the time being we assume three exist. If a production system
* is built with four the this function would have to be changed to
* detect the number of levels for reporting.
*/
/*
* We have no data on the hit level or data source for stores in the
* Neoverse SPE records.
*/
if (record->op & ARM_SPE_OP_ST) {
data_src->mem_lvl = PERF_MEM_LVL_NA;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
data_src->mem_snoop = PERF_MEM_SNOOP_NA;
return;
}
switch (record->source) {
case ARM_SPE_NV_L1D:
data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
break;
case ARM_SPE_NV_L2:
data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
break;
case ARM_SPE_NV_PEER_CORE:
data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
break;
/*
* We don't know if this is L1, L2 but we do know it was a cache-2-cache
* transfer, so set SNOOPX_PEER
*/
case ARM_SPE_NV_LOCAL_CLUSTER:
case ARM_SPE_NV_PEER_CLUSTER:
data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
break;
/*
* System cache is assumed to be L3
*/
case ARM_SPE_NV_SYS_CACHE:
data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
break;
/*
* We don't know what level it hit in, except it came from the other
* socket
*/
case ARM_SPE_NV_REMOTE:
data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
break;
case ARM_SPE_NV_DRAM:
data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
break;
default:
break;
}
}
static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
union perf_mem_data_src *data_src)
{
if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
data_src->mem_lvl = PERF_MEM_LVL_L3;
if (record->type & ARM_SPE_LLC_MISS)
data_src->mem_lvl |= PERF_MEM_LVL_MISS;
else
data_src->mem_lvl |= PERF_MEM_LVL_HIT;
} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
data_src->mem_lvl = PERF_MEM_LVL_L1;
if (record->type & ARM_SPE_L1D_MISS)
data_src->mem_lvl |= PERF_MEM_LVL_MISS;
else
data_src->mem_lvl |= PERF_MEM_LVL_HIT;
}
if (record->type & ARM_SPE_REMOTE_ACCESS)
data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
}
static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
{
union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA };
bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
if (record->op & ARM_SPE_OP_LD)
data_src.mem_op = PERF_MEM_OP_LOAD;
else if (record->op & ARM_SPE_OP_ST)
data_src.mem_op = PERF_MEM_OP_STORE;
else
return 0;
if (is_neoverse)
arm_spe__synth_data_source_neoverse(record, &data_src);
else
arm_spe__synth_data_source_generic(record, &data_src);
if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
data_src.mem_dtlb = PERF_MEM_TLB_WK;
if (record->type & ARM_SPE_TLB_MISS)
data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
else
data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
}
return data_src.val;
}
static int arm_spe_sample(struct arm_spe_queue *speq)
{
const struct arm_spe_record *record = &speq->decoder->record;
struct arm_spe *spe = speq->spe;
u64 data_src;
int err;
data_src = arm_spe__synth_data_source(record, spe->midr);
if (spe->sample_flc) {
if (record->type & ARM_SPE_L1D_MISS) {
err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_L1D_ACCESS) {
err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
data_src);
if (err)
return err;
}
}
if (spe->sample_llc) {
if (record->type & ARM_SPE_LLC_MISS) {
err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_LLC_ACCESS) {
err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
data_src);
if (err)
return err;
}
}
if (spe->sample_tlb) {
if (record->type & ARM_SPE_TLB_MISS) {
err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_TLB_ACCESS) {
err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
data_src);
if (err)
return err;
}
}
if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
err = arm_spe__synth_branch_sample(speq, spe->branch_miss_id);
if (err)
return err;
}
if (spe->sample_remote_access &&
(record->type & ARM_SPE_REMOTE_ACCESS)) {
err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
data_src);
if (err)
return err;
}
/*
* When data_src is zero it means the record is not a memory operation,
* skip to synthesize memory sample for this case.
*/
if (spe->sample_memory && data_src) {
err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
if (err)
return err;
}
if (spe->sample_instructions) {
err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
if (err)
return err;
}
return 0;
}
static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
{
struct arm_spe *spe = speq->spe;
struct arm_spe_record *record;
int ret;
if (!spe->kernel_start)
spe->kernel_start = machine__kernel_start(spe->machine);
while (1) {
/*
* The usual logic is firstly to decode the packets, and then
* based the record to synthesize sample; but here the flow is
* reversed: it calls arm_spe_sample() for synthesizing samples
* prior to arm_spe_decode().
*
* Two reasons for this code logic:
* 1. Firstly, when setup queue in arm_spe__setup_queue(), it
* has decoded trace data and generated a record, but the record
* is left to generate sample until run to here, so it's correct
* to synthesize sample for the left record.
* 2. After decoding trace data, it needs to compare the record
* timestamp with the coming perf event, if the record timestamp
* is later than the perf event, it needs bail out and pushs the
* record into auxtrace heap, thus the record can be deferred to
* synthesize sample until run to here at the next time; so this
* can correlate samples between Arm SPE trace data and other
* perf events with correct time ordering.
*/
/*
* Update pid/tid info.
*/
record = &speq->decoder->record;
if (!spe->timeless_decoding && record->context_id != (u64)-1) {
ret = arm_spe_set_tid(speq, record->context_id);
if (ret)
return ret;
spe->use_ctx_pkt_for_pid = true;
}
ret = arm_spe_sample(speq);
if (ret)
return ret;
ret = arm_spe_decode(speq->decoder);
if (!ret) {
pr_debug("No data or all data has been processed.\n");
return 1;
}
/*
* Error is detected when decode SPE trace data, continue to
* the next trace data and find out more records.
*/
if (ret < 0)
continue;
record = &speq->decoder->record;
/* Update timestamp for the last record */
if (record->timestamp > speq->timestamp)
speq->timestamp = record->timestamp;
/*
* If the timestamp of the queue is later than timestamp of the
* coming perf event, bail out so can allow the perf event to
* be processed ahead.
*/
if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
*timestamp = speq->timestamp;
return 0;
}
}
return 0;
}
static int arm_spe__setup_queue(struct arm_spe *spe,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
struct arm_spe_queue *speq = queue->priv;
struct arm_spe_record *record;
if (list_empty(&queue->head) || speq)
return 0;
speq = arm_spe__alloc_queue(spe, queue_nr);
if (!speq)
return -ENOMEM;
queue->priv = speq;
if (queue->cpu != -1)
speq->cpu = queue->cpu;
if (!speq->on_heap) {
int ret;
if (spe->timeless_decoding)
return 0;
retry:
ret = arm_spe_decode(speq->decoder);
if (!ret)
return 0;
if (ret < 0)
goto retry;
record = &speq->decoder->record;
speq->timestamp = record->timestamp;
ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
if (ret)
return ret;
speq->on_heap = true;
}
return 0;
}
static int arm_spe__setup_queues(struct arm_spe *spe)
{
unsigned int i;
int ret;
for (i = 0; i < spe->queues.nr_queues; i++) {
ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
if (ret)
return ret;
}
return 0;
}
static int arm_spe__update_queues(struct arm_spe *spe)
{
if (spe->queues.new_data) {
spe->queues.new_data = false;
return arm_spe__setup_queues(spe);
}
return 0;
}
static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
{
struct evsel *evsel;
struct evlist *evlist = spe->session->evlist;
bool timeless_decoding = true;
/*
* Circle through the list of event and complain if we find one
* with the time bit set.
*/
evlist__for_each_entry(evlist, evsel) {
if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
timeless_decoding = false;
}
return timeless_decoding;
}
static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
{
unsigned int queue_nr;
u64 ts;
int ret;
while (1) {
struct auxtrace_queue *queue;
struct arm_spe_queue *speq;
if (!spe->heap.heap_cnt)
return 0;
if (spe->heap.heap_array[0].ordinal >= timestamp)
return 0;
queue_nr = spe->heap.heap_array[0].queue_nr;
queue = &spe->queues.queue_array[queue_nr];
speq = queue->priv;
auxtrace_heap__pop(&spe->heap);
if (spe->heap.heap_cnt) {
ts = spe->heap.heap_array[0].ordinal + 1;
if (ts > timestamp)
ts = timestamp;
} else {
ts = timestamp;
}
/*
* A previous context-switch event has set pid/tid in the machine's context, so
* here we need to update the pid/tid in the thread and SPE queue.
*/
if (!spe->use_ctx_pkt_for_pid)
arm_spe_set_pid_tid_cpu(spe, queue);
ret = arm_spe_run_decoder(speq, &ts);
if (ret < 0) {
auxtrace_heap__add(&spe->heap, queue_nr, ts);
return ret;
}
if (!ret) {
ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
if (ret < 0)
return ret;
} else {
speq->on_heap = false;
}
}
return 0;
}
static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
u64 time_)
{
struct auxtrace_queues *queues = &spe->queues;
unsigned int i;
u64 ts = 0;
for (i = 0; i < queues->nr_queues; i++) {
struct auxtrace_queue *queue = &spe->queues.queue_array[i];
struct arm_spe_queue *speq = queue->priv;
if (speq && (tid == -1 || speq->tid == tid)) {
speq->time = time_;
arm_spe_set_pid_tid_cpu(spe, queue);
arm_spe_run_decoder(speq, &ts);
}
}
return 0;
}
static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
struct perf_sample *sample)
{
pid_t pid, tid;
int cpu;
if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
return 0;
pid = event->context_switch.next_prev_pid;
tid = event->context_switch.next_prev_tid;
cpu = sample->cpu;
if (tid == -1)
pr_warning("context_switch event has no tid\n");
return machine__set_current_tid(spe->machine, cpu, pid, tid);
}
static int arm_spe_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
{
int err = 0;
u64 timestamp;
struct arm_spe *spe = container_of(session->auxtrace,
struct arm_spe, auxtrace);
if (dump_trace)
return 0;
if (!tool->ordered_events) {
pr_err("SPE trace requires ordered events\n");
return -EINVAL;
}
if (sample->time && (sample->time != (u64) -1))
timestamp = perf_time_to_tsc(sample->time, &spe->tc);
else
timestamp = 0;
if (timestamp || spe->timeless_decoding) {
err = arm_spe__update_queues(spe);
if (err)
return err;
}
if (spe->timeless_decoding) {
if (event->header.type == PERF_RECORD_EXIT) {
err = arm_spe_process_timeless_queues(spe,
event->fork.tid,
sample->time);
}
} else if (timestamp) {
err = arm_spe_process_queues(spe, timestamp);
if (err)
return err;
if (!spe->use_ctx_pkt_for_pid &&
(event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
event->header.type == PERF_RECORD_SWITCH))
err = arm_spe_context_switch(spe, event, sample);
}
return err;
}
static int arm_spe_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
if (!spe->data_queued) {
struct auxtrace_buffer *buffer;
off_t data_offset;
int fd = perf_data__fd(session->data);
int err;
if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
if (data_offset == -1)
return -errno;
}
err = auxtrace_queues__add_event(&spe->queues, session, event,
data_offset, &buffer);
if (err)
return err;
/* Dump here now we have copied a piped trace out of the pipe */
if (dump_trace) {
if (auxtrace_buffer__get_data(buffer, fd)) {
arm_spe_dump_event(spe, buffer->data,
buffer->size);
auxtrace_buffer__put_data(buffer);
}
}
}
return 0;
}
static int arm_spe_flush(struct perf_session *session __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
int ret;
if (dump_trace)
return 0;
if (!tool->ordered_events)
return -EINVAL;
ret = arm_spe__update_queues(spe);
if (ret < 0)
return ret;
if (spe->timeless_decoding)
return arm_spe_process_timeless_queues(spe, -1,
MAX_TIMESTAMP - 1);
ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
if (ret)
return ret;
if (!spe->use_ctx_pkt_for_pid)
ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
"Matching of TIDs to SPE events could be inaccurate.\n");
return 0;
}
static void arm_spe_free_queue(void *priv)
{
struct arm_spe_queue *speq = priv;
if (!speq)
return;
thread__zput(speq->thread);
arm_spe_decoder_free(speq->decoder);
zfree(&speq->event_buf);
free(speq);
}
static void arm_spe_free_events(struct perf_session *session)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
struct auxtrace_queues *queues = &spe->queues;
unsigned int i;
for (i = 0; i < queues->nr_queues; i++) {
arm_spe_free_queue(queues->queue_array[i].priv);
queues->queue_array[i].priv = NULL;
}
auxtrace_queues__free(queues);
}
static void arm_spe_free(struct perf_session *session)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
auxtrace_heap__free(&spe->heap);
arm_spe_free_events(session);
session->auxtrace = NULL;
free(spe);
}
static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace);
return evsel->core.attr.type == spe->pmu_type;
}
static const char * const arm_spe_info_fmts[] = {
[ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n",
};
static void arm_spe_print_info(__u64 *arr)
{
if (!dump_trace)
return;
fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
}
struct arm_spe_synth {
struct perf_tool dummy_tool;
struct perf_session *session;
};
static int arm_spe_event_synth(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct arm_spe_synth *arm_spe_synth =
container_of(tool, struct arm_spe_synth, dummy_tool);
return perf_session__deliver_synth_event(arm_spe_synth->session,
event, NULL);
}
static int arm_spe_synth_event(struct perf_session *session,
struct perf_event_attr *attr, u64 id)
{
struct arm_spe_synth arm_spe_synth;
memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
arm_spe_synth.session = session;
return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
&id, arm_spe_event_synth);
}
static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
const char *name)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.id && evsel->core.id[0] == id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup(name);
break;
}
}
}
static int
arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel;
struct perf_event_attr attr;
bool found = false;
u64 id;
int err;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == spe->pmu_type) {
found = true;
break;
}
}
if (!found) {
pr_debug("No selected events with SPE trace data\n");
return 0;
}
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.size = sizeof(struct perf_event_attr);
attr.type = PERF_TYPE_HARDWARE;
attr.sample_type = evsel->core.attr.sample_type &
(PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
if (spe->timeless_decoding)
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
else
attr.sample_type |= PERF_SAMPLE_TIME;
spe->sample_type = attr.sample_type;
attr.exclude_user = evsel->core.attr.exclude_user;
attr.exclude_kernel = evsel->core.attr.exclude_kernel;
attr.exclude_hv = evsel->core.attr.exclude_hv;
attr.exclude_host = evsel->core.attr.exclude_host;
attr.exclude_guest = evsel->core.attr.exclude_guest;
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
/* create new id val to be a fixed offset from evsel id */
id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
if (spe->synth_opts.flc) {
spe->sample_flc = true;
/* Level 1 data cache miss */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->l1d_miss_id = id;
arm_spe_set_event_name(evlist, id, "l1d-miss");
id += 1;
/* Level 1 data cache access */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->l1d_access_id = id;
arm_spe_set_event_name(evlist, id, "l1d-access");
id += 1;
}
if (spe->synth_opts.llc) {
spe->sample_llc = true;
/* Last level cache miss */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->llc_miss_id = id;
arm_spe_set_event_name(evlist, id, "llc-miss");
id += 1;
/* Last level cache access */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->llc_access_id = id;
arm_spe_set_event_name(evlist, id, "llc-access");
id += 1;
}
if (spe->synth_opts.tlb) {
spe->sample_tlb = true;
/* TLB miss */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->tlb_miss_id = id;
arm_spe_set_event_name(evlist, id, "tlb-miss");
id += 1;
/* TLB access */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->tlb_access_id = id;
arm_spe_set_event_name(evlist, id, "tlb-access");
id += 1;
}
if (spe->synth_opts.branches) {
spe->sample_branch = true;
/* Branch miss */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->branch_miss_id = id;
arm_spe_set_event_name(evlist, id, "branch-miss");
id += 1;
}
if (spe->synth_opts.remote_access) {
spe->sample_remote_access = true;
/* Remote access */
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->remote_access_id = id;
arm_spe_set_event_name(evlist, id, "remote-access");
id += 1;
}
if (spe->synth_opts.mem) {
spe->sample_memory = true;
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->memory_id = id;
arm_spe_set_event_name(evlist, id, "memory");
id += 1;
}
if (spe->synth_opts.instructions) {
if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
goto synth_instructions_out;
}
if (spe->synth_opts.period > 1)
pr_warning("Arm SPE has a hardware-based sample period.\n"
"Additional instruction events will be discarded by --itrace\n");
spe->sample_instructions = true;
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
attr.sample_period = spe->synth_opts.period;
spe->instructions_sample_period = attr.sample_period;
err = arm_spe_synth_event(session, &attr, id);
if (err)
return err;
spe->instructions_id = id;
arm_spe_set_event_name(evlist, id, "instructions");
}
synth_instructions_out:
return 0;
}
int arm_spe_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
struct perf_record_time_conv *tc = &session->time_conv;
const char *cpuid = perf_env__cpuid(session->evlist->env);
u64 midr = strtol(cpuid, NULL, 16);
struct arm_spe *spe;
int err;
if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
min_sz)
return -EINVAL;
spe = zalloc(sizeof(struct arm_spe));
if (!spe)
return -ENOMEM;
err = auxtrace_queues__init(&spe->queues);
if (err)
goto err_free;
spe->session = session;
spe->machine = &session->machines.host; /* No kvm support */
spe->auxtrace_type = auxtrace_info->type;
spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
spe->midr = midr;
spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
/*
* The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
* and the parameters for hardware clock are stored in the session
* context. Passes these parameters to the struct perf_tsc_conversion
* in "spe->tc", which is used for later conversion between clock
* counter and timestamp.
*
* For backward compatibility, copies the fields starting from
* "time_cycles" only if they are contained in the event.
*/
spe->tc.time_shift = tc->time_shift;
spe->tc.time_mult = tc->time_mult;
spe->tc.time_zero = tc->time_zero;
if (event_contains(*tc, time_cycles)) {
spe->tc.time_cycles = tc->time_cycles;
spe->tc.time_mask = tc->time_mask;
spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
spe->tc.cap_user_time_short = tc->cap_user_time_short;
}
spe->auxtrace.process_event = arm_spe_process_event;
spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
spe->auxtrace.flush_events = arm_spe_flush;
spe->auxtrace.free_events = arm_spe_free_events;
spe->auxtrace.free = arm_spe_free;
spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
session->auxtrace = &spe->auxtrace;
arm_spe_print_info(&auxtrace_info->priv[0]);
if (dump_trace)
return 0;
if (session->itrace_synth_opts && session->itrace_synth_opts->set)
spe->synth_opts = *session->itrace_synth_opts;
else
itrace_synth_opts__set_default(&spe->synth_opts, false);
err = arm_spe_synth_events(spe, session);
if (err)
goto err_free_queues;
err = auxtrace_queues__process_index(&spe->queues, session);
if (err)
goto err_free_queues;
if (spe->queues.populated)
spe->data_queued = true;
return 0;
err_free_queues:
auxtrace_queues__free(&spe->queues);
session->auxtrace = NULL;
err_free:
free(spe);
return err;
}
| linux-master | tools/perf/util/arm-spe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* File for any parts of the Coresight decoding that don't require
* OpenCSD.
*/
#include <errno.h>
#include <inttypes.h>
#include "cs-etm.h"
static const char * const cs_etm_global_header_fmts[] = {
[CS_HEADER_VERSION] = " Header version %llx\n",
[CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
[CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
};
static const char * const cs_etm_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
[CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETM_ETMCR] = " ETMCR %llx\n",
[CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
[CS_ETM_ETMCCER] = " ETMCCER %llx\n",
[CS_ETM_ETMIDR] = " ETMIDR %llx\n",
};
static const char * const cs_etmv4_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
[CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
[CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
[CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
[CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
[CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
[CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
[CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
[CS_ETMV4_TS_SOURCE] = " TS_SOURCE %lld\n",
};
static const char * const cs_ete_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
[CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETE_TRCCONFIGR] = " TRCCONFIGR %llx\n",
[CS_ETE_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
[CS_ETE_TRCIDR0] = " TRCIDR0 %llx\n",
[CS_ETE_TRCIDR1] = " TRCIDR1 %llx\n",
[CS_ETE_TRCIDR2] = " TRCIDR2 %llx\n",
[CS_ETE_TRCIDR8] = " TRCIDR8 %llx\n",
[CS_ETE_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
[CS_ETE_TRCDEVARCH] = " TRCDEVARCH %llx\n",
[CS_ETE_TS_SOURCE] = " TS_SOURCE %lld\n",
};
static const char * const param_unk_fmt =
" Unknown parameter [%d] %"PRIx64"\n";
static const char * const magic_unk_fmt =
" Magic number Unknown %"PRIx64"\n";
static int cs_etm__print_cpu_metadata_v0(u64 *val, int *offset)
{
int i = *offset, j, nr_params = 0, fmt_offset;
u64 magic;
/* check magic value */
magic = val[i + CS_ETM_MAGIC];
if ((magic != __perf_cs_etmv3_magic) &&
(magic != __perf_cs_etmv4_magic)) {
/* failure - note bad magic value */
fprintf(stdout, magic_unk_fmt, magic);
return -EINVAL;
}
/* print common header block */
fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
if (magic == __perf_cs_etmv3_magic) {
nr_params = CS_ETM_NR_TRC_PARAMS_V0;
fmt_offset = CS_ETM_ETMCR;
/* after common block, offset format index past NR_PARAMS */
for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
} else if (magic == __perf_cs_etmv4_magic) {
nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
fmt_offset = CS_ETMV4_TRCCONFIGR;
/* after common block, offset format index past NR_PARAMS */
for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
}
*offset = i;
return 0;
}
static int cs_etm__print_cpu_metadata_v1(u64 *val, int *offset)
{
int i = *offset, j, total_params = 0;
u64 magic;
magic = val[i + CS_ETM_MAGIC];
/* total params to print is NR_PARAMS + common block size for v1 */
total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
if (magic == __perf_cs_etmv3_magic) {
for (j = 0; j < total_params; j++, i++) {
/* if newer record - could be excess params */
if (j >= CS_ETM_PRIV_MAX)
fprintf(stdout, param_unk_fmt, j, val[i]);
else
fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
}
} else if (magic == __perf_cs_etmv4_magic) {
for (j = 0; j < total_params; j++, i++) {
/* if newer record - could be excess params */
if (j >= CS_ETMV4_PRIV_MAX)
fprintf(stdout, param_unk_fmt, j, val[i]);
else
fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
}
} else if (magic == __perf_cs_ete_magic) {
for (j = 0; j < total_params; j++, i++) {
/* if newer record - could be excess params */
if (j >= CS_ETE_PRIV_MAX)
fprintf(stdout, param_unk_fmt, j, val[i]);
else
fprintf(stdout, cs_ete_priv_fmts[j], val[i]);
}
} else {
/* failure - note bad magic value and error out */
fprintf(stdout, magic_unk_fmt, magic);
return -EINVAL;
}
*offset = i;
return 0;
}
static void cs_etm__print_auxtrace_info(u64 *val, int num)
{
int i, cpu = 0, version, err;
version = val[0];
for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
if (version == 0)
err = cs_etm__print_cpu_metadata_v0(val, &i);
/* printing same for both, but value bit flags added on v2 */
else if ((version == 1) || (version == 2))
err = cs_etm__print_cpu_metadata_v1(val, &i);
if (err)
return;
}
}
/*
* Do some basic checks and print the auxtrace info header before calling
* into cs_etm__process_auxtrace_info_full() which requires OpenCSD to be
* linked in. This allows some basic debugging if OpenCSD is missing.
*/
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
int event_header_size = sizeof(struct perf_event_header);
int num_cpu;
u64 *ptr = NULL;
u64 hdr_version;
if (auxtrace_info->header.size < (event_header_size + INFO_HEADER_SIZE))
return -EINVAL;
/* First the global part */
ptr = (u64 *) auxtrace_info->priv;
/* Look for version of the header */
hdr_version = ptr[0];
if (hdr_version > CS_HEADER_CURRENT_VERSION) {
pr_err("\nCS ETM Trace: Unknown Header Version = %#" PRIx64, hdr_version);
pr_err(", version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
return -EINVAL;
}
if (dump_trace) {
num_cpu = ptr[CS_PMU_TYPE_CPUS] & 0xffffffff;
cs_etm__print_auxtrace_info(ptr, num_cpu);
}
return cs_etm__process_auxtrace_info_full(event, session);
}
| linux-master | tools/perf/util/cs-etm-base.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/param.h>
#include <sys/utsname.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <api/fs/fs.h>
#include <linux/zalloc.h>
#include <perf/cpumap.h>
#include "cputopo.h"
#include "cpumap.h"
#include "debug.h"
#include "env.h"
#include "pmu.h"
#include "pmus.h"
#define PACKAGE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/package_cpus_list"
#define PACKAGE_CPUS_FMT_OLD \
"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
#define DIE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
#define CORE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
#define CORE_CPUS_FMT_OLD \
"%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
#define NODE_ONLINE_FMT \
"%s/devices/system/node/online"
#define NODE_MEMINFO_FMT \
"%s/devices/system/node/node%d/meminfo"
#define NODE_CPULIST_FMT \
"%s/devices/system/node/node%d/cpulist"
static int build_cpu_topology(struct cpu_topology *tp, int cpu)
{
FILE *fp;
char filename[MAXPATHLEN];
char *buf = NULL, *p;
size_t len = 0;
ssize_t sret;
u32 i = 0;
int ret = -1;
scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT,
sysfs__mountpoint(), cpu);
if (access(filename, F_OK) == -1) {
scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT_OLD,
sysfs__mountpoint(), cpu);
}
fp = fopen(filename, "r");
if (!fp)
goto try_dies;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_dies;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->package_cpus_lists; i++) {
if (!strcmp(buf, tp->package_cpus_list[i]))
break;
}
if (i == tp->package_cpus_lists) {
tp->package_cpus_list[i] = buf;
tp->package_cpus_lists++;
buf = NULL;
len = 0;
}
ret = 0;
try_dies:
if (!tp->die_cpus_list)
goto try_threads;
scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
sysfs__mountpoint(), cpu);
fp = fopen(filename, "r");
if (!fp)
goto try_threads;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_threads;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->die_cpus_lists; i++) {
if (!strcmp(buf, tp->die_cpus_list[i]))
break;
}
if (i == tp->die_cpus_lists) {
tp->die_cpus_list[i] = buf;
tp->die_cpus_lists++;
buf = NULL;
len = 0;
}
ret = 0;
try_threads:
scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT,
sysfs__mountpoint(), cpu);
if (access(filename, F_OK) == -1) {
scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT_OLD,
sysfs__mountpoint(), cpu);
}
fp = fopen(filename, "r");
if (!fp)
goto done;
if (getline(&buf, &len, fp) <= 0)
goto done;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->core_cpus_lists; i++) {
if (!strcmp(buf, tp->core_cpus_list[i]))
break;
}
if (i == tp->core_cpus_lists) {
tp->core_cpus_list[i] = buf;
tp->core_cpus_lists++;
buf = NULL;
}
ret = 0;
done:
if (fp)
fclose(fp);
free(buf);
return ret;
}
void cpu_topology__delete(struct cpu_topology *tp)
{
u32 i;
if (!tp)
return;
for (i = 0 ; i < tp->package_cpus_lists; i++)
zfree(&tp->package_cpus_list[i]);
for (i = 0 ; i < tp->die_cpus_lists; i++)
zfree(&tp->die_cpus_list[i]);
for (i = 0 ; i < tp->core_cpus_lists; i++)
zfree(&tp->core_cpus_list[i]);
free(tp);
}
bool cpu_topology__smt_on(const struct cpu_topology *topology)
{
for (u32 i = 0; i < topology->core_cpus_lists; i++) {
const char *cpu_list = topology->core_cpus_list[i];
/*
* If there is a need to separate siblings in a core then SMT is
* enabled.
*/
if (strchr(cpu_list, ',') || strchr(cpu_list, '-'))
return true;
}
return false;
}
bool cpu_topology__core_wide(const struct cpu_topology *topology,
const char *user_requested_cpu_list)
{
struct perf_cpu_map *user_requested_cpus;
/*
* If user_requested_cpu_list is empty then all CPUs are recorded and so
* core_wide is true.
*/
if (!user_requested_cpu_list)
return true;
user_requested_cpus = perf_cpu_map__new(user_requested_cpu_list);
/* Check that every user requested CPU is the complete set of SMT threads on a core. */
for (u32 i = 0; i < topology->core_cpus_lists; i++) {
const char *core_cpu_list = topology->core_cpus_list[i];
struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list);
struct perf_cpu cpu;
int idx;
bool has_first, first = true;
perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
if (first) {
has_first = perf_cpu_map__has(user_requested_cpus, cpu);
first = false;
} else {
/*
* If the first core CPU is user requested then
* all subsequent CPUs in the core must be user
* requested too. If the first CPU isn't user
* requested then none of the others must be
* too.
*/
if (perf_cpu_map__has(user_requested_cpus, cpu) != has_first) {
perf_cpu_map__put(core_cpus);
perf_cpu_map__put(user_requested_cpus);
return false;
}
}
}
perf_cpu_map__put(core_cpus);
}
perf_cpu_map__put(user_requested_cpus);
return true;
}
static bool has_die_topology(void)
{
char filename[MAXPATHLEN];
struct utsname uts;
if (uname(&uts) < 0)
return false;
if (strncmp(uts.machine, "x86_64", 6) &&
strncmp(uts.machine, "s390x", 5))
return false;
scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
sysfs__mountpoint(), 0);
if (access(filename, F_OK) == -1)
return false;
return true;
}
const struct cpu_topology *online_topology(void)
{
static const struct cpu_topology *topology;
if (!topology) {
topology = cpu_topology__new();
if (!topology) {
pr_err("Error creating CPU topology");
abort();
}
}
return topology;
}
struct cpu_topology *cpu_topology__new(void)
{
struct cpu_topology *tp = NULL;
void *addr;
u32 nr, i, nr_addr;
size_t sz;
long ncpus;
int ret = -1;
struct perf_cpu_map *map;
bool has_die = has_die_topology();
ncpus = cpu__max_present_cpu().cpu;
/* build online CPU map */
map = perf_cpu_map__new(NULL);
if (map == NULL) {
pr_debug("failed to get system cpumap\n");
return NULL;
}
nr = (u32)(ncpus & UINT_MAX);
sz = nr * sizeof(char *);
if (has_die)
nr_addr = 3;
else
nr_addr = 2;
addr = calloc(1, sizeof(*tp) + nr_addr * sz);
if (!addr)
goto out_free;
tp = addr;
addr += sizeof(*tp);
tp->package_cpus_list = addr;
addr += sz;
if (has_die) {
tp->die_cpus_list = addr;
addr += sz;
}
tp->core_cpus_list = addr;
for (i = 0; i < nr; i++) {
if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i }))
continue;
ret = build_cpu_topology(tp, i);
if (ret < 0)
break;
}
out_free:
perf_cpu_map__put(map);
if (ret) {
cpu_topology__delete(tp);
tp = NULL;
}
return tp;
}
static int load_numa_node(struct numa_topology_node *node, int nr)
{
char str[MAXPATHLEN];
char field[32];
char *buf = NULL, *p;
size_t len = 0;
int ret = -1;
FILE *fp;
u64 mem;
node->node = (u32) nr;
scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
sysfs__mountpoint(), nr);
fp = fopen(str, "r");
if (!fp)
return -1;
while (getline(&buf, &len, fp) > 0) {
/* skip over invalid lines */
if (!strchr(buf, ':'))
continue;
if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
goto err;
if (!strcmp(field, "MemTotal:"))
node->mem_total = mem;
if (!strcmp(field, "MemFree:"))
node->mem_free = mem;
if (node->mem_total && node->mem_free)
break;
}
fclose(fp);
fp = NULL;
scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
sysfs__mountpoint(), nr);
fp = fopen(str, "r");
if (!fp)
return -1;
if (getline(&buf, &len, fp) <= 0)
goto err;
p = strchr(buf, '\n');
if (p)
*p = '\0';
node->cpus = buf;
fclose(fp);
return 0;
err:
free(buf);
if (fp)
fclose(fp);
return ret;
}
struct numa_topology *numa_topology__new(void)
{
struct perf_cpu_map *node_map = NULL;
struct numa_topology *tp = NULL;
char path[MAXPATHLEN];
char *buf = NULL;
size_t len = 0;
u32 nr, i;
FILE *fp;
char *c;
scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
sysfs__mountpoint());
fp = fopen(path, "r");
if (!fp)
return NULL;
if (getline(&buf, &len, fp) <= 0)
goto out;
c = strchr(buf, '\n');
if (c)
*c = '\0';
node_map = perf_cpu_map__new(buf);
if (!node_map)
goto out;
nr = (u32) perf_cpu_map__nr(node_map);
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
if (!tp)
goto out;
tp->nr = nr;
for (i = 0; i < nr; i++) {
if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
numa_topology__delete(tp);
tp = NULL;
break;
}
}
out:
free(buf);
fclose(fp);
perf_cpu_map__put(node_map);
return tp;
}
void numa_topology__delete(struct numa_topology *tp)
{
u32 i;
for (i = 0; i < tp->nr; i++)
zfree(&tp->nodes[i].cpus);
free(tp);
}
static int load_hybrid_node(struct hybrid_topology_node *node,
struct perf_pmu *pmu)
{
char *buf = NULL, *p;
FILE *fp;
size_t len = 0;
node->pmu_name = strdup(pmu->name);
if (!node->pmu_name)
return -1;
fp = perf_pmu__open_file(pmu, "cpus");
if (!fp)
goto err;
if (getline(&buf, &len, fp) <= 0) {
fclose(fp);
goto err;
}
p = strchr(buf, '\n');
if (p)
*p = '\0';
fclose(fp);
node->cpus = buf;
return 0;
err:
zfree(&node->pmu_name);
free(buf);
return -1;
}
struct hybrid_topology *hybrid_topology__new(void)
{
struct perf_pmu *pmu = NULL;
struct hybrid_topology *tp = NULL;
int nr = perf_pmus__num_core_pmus(), i = 0;
if (nr <= 1)
return NULL;
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
if (!tp)
return NULL;
tp->nr = nr;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
if (load_hybrid_node(&tp->nodes[i], pmu)) {
hybrid_topology__delete(tp);
return NULL;
}
i++;
}
return tp;
}
void hybrid_topology__delete(struct hybrid_topology *tp)
{
u32 i;
for (i = 0; i < tp->nr; i++) {
zfree(&tp->nodes[i].pmu_name);
zfree(&tp->nodes[i].cpus);
}
free(tp);
}
| linux-master | tools/perf/util/cputopo.c |
// SPDX-License-Identifier: GPL-2.0
#include "util.h"
#include "debug.h"
#include "event.h"
#include <api/fs/fs.h>
#include <sys/stat.h>
#include <sys/utsname.h>
#include <dirent.h>
#include <fcntl.h>
#include <inttypes.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <limits.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/time64.h>
#include <linux/overflow.h>
#include <unistd.h>
#include "cap.h"
#include "strlist.h"
#include "string2.h"
/*
* XXX We need to find a better place for these things...
*/
const char *input_name;
bool perf_singlethreaded = true;
void perf_set_singlethreaded(void)
{
perf_singlethreaded = true;
}
void perf_set_multithreaded(void)
{
perf_singlethreaded = false;
}
int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
int sysctl__max_stack(void)
{
int value;
if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
sysctl_perf_event_max_stack = value;
if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
sysctl_perf_event_max_contexts_per_stack = value;
return sysctl_perf_event_max_stack;
}
bool sysctl__nmi_watchdog_enabled(void)
{
static bool cached;
static bool nmi_watchdog;
int value;
if (cached)
return nmi_watchdog;
if (sysctl__read_int("kernel/nmi_watchdog", &value) < 0)
return false;
nmi_watchdog = (value > 0) ? true : false;
cached = true;
return nmi_watchdog;
}
bool test_attr__enabled;
bool perf_host = true;
bool perf_guest = false;
void event_attr_init(struct perf_event_attr *attr)
{
if (!perf_host)
attr->exclude_host = 1;
if (!perf_guest)
attr->exclude_guest = 1;
/* to capture ABI version */
attr->size = sizeof(*attr);
}
int mkdir_p(char *path, mode_t mode)
{
struct stat st;
int err;
char *d = path;
if (*d != '/')
return -1;
if (stat(path, &st) == 0)
return 0;
while (*++d == '/');
while ((d = strchr(d, '/'))) {
*d = '\0';
err = stat(path, &st) && mkdir(path, mode);
*d++ = '/';
if (err)
return -1;
while (*d == '/')
++d;
}
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
static bool match_pat(char *file, const char **pat)
{
int i = 0;
if (!pat)
return true;
while (pat[i]) {
if (strglobmatch(file, pat[i]))
return true;
i++;
}
return false;
}
/*
* The depth specify how deep the removal will go.
* 0 - will remove only files under the 'path' directory
* 1 .. x - will dive in x-level deep under the 'path' directory
*
* If specified the pat is array of string patterns ended with NULL,
* which are checked upon every file/directory found. Only matching
* ones are removed.
*
* The function returns:
* 0 on success
* -1 on removal failure with errno set
* -2 on pattern failure
*/
static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
{
DIR *dir;
int ret;
struct dirent *d;
char namebuf[PATH_MAX];
struct stat statbuf;
/* Do not fail if there's no file. */
ret = lstat(path, &statbuf);
if (ret)
return 0;
/* Try to remove any file we get. */
if (!(statbuf.st_mode & S_IFDIR))
return unlink(path);
/* We have directory in path. */
dir = opendir(path);
if (dir == NULL)
return -1;
while ((d = readdir(dir)) != NULL && !ret) {
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
if (!match_pat(d->d_name, pat)) {
ret = -2;
break;
}
scnprintf(namebuf, sizeof(namebuf), "%s/%s",
path, d->d_name);
/* We have to check symbolic link itself */
ret = lstat(namebuf, &statbuf);
if (ret < 0) {
pr_debug("stat failed: %s\n", namebuf);
break;
}
if (S_ISDIR(statbuf.st_mode))
ret = depth ? rm_rf_depth_pat(namebuf, depth - 1, pat) : 0;
else
ret = unlink(namebuf);
}
closedir(dir);
if (ret < 0)
return ret;
return rmdir(path);
}
static int rm_rf_a_kcore_dir(const char *path, const char *name)
{
char kcore_dir_path[PATH_MAX];
const char *pat[] = {
"kcore",
"kallsyms",
"modules",
NULL,
};
snprintf(kcore_dir_path, sizeof(kcore_dir_path), "%s/%s", path, name);
return rm_rf_depth_pat(kcore_dir_path, 0, pat);
}
static bool kcore_dir_filter(const char *name __maybe_unused, struct dirent *d)
{
const char *pat[] = {
"kcore_dir",
"kcore_dir__[1-9]*",
NULL,
};
return match_pat(d->d_name, pat);
}
static int rm_rf_kcore_dir(const char *path)
{
struct strlist *kcore_dirs;
struct str_node *nd;
int ret;
kcore_dirs = lsdir(path, kcore_dir_filter);
if (!kcore_dirs)
return 0;
strlist__for_each_entry(nd, kcore_dirs) {
ret = rm_rf_a_kcore_dir(path, nd->s);
if (ret)
return ret;
}
strlist__delete(kcore_dirs);
return 0;
}
int rm_rf_perf_data(const char *path)
{
const char *pat[] = {
"data",
"data.*",
NULL,
};
rm_rf_kcore_dir(path);
return rm_rf_depth_pat(path, 0, pat);
}
int rm_rf(const char *path)
{
return rm_rf_depth_pat(path, INT_MAX, NULL);
}
/* A filter which removes dot files */
bool lsdir_no_dot_filter(const char *name __maybe_unused, struct dirent *d)
{
return d->d_name[0] != '.';
}
/* lsdir reads a directory and store it in strlist */
struct strlist *lsdir(const char *name,
bool (*filter)(const char *, struct dirent *))
{
struct strlist *list = NULL;
DIR *dir;
struct dirent *d;
dir = opendir(name);
if (!dir)
return NULL;
list = strlist__new(NULL, NULL);
if (!list) {
errno = ENOMEM;
goto out;
}
while ((d = readdir(dir)) != NULL) {
if (!filter || filter(name, d))
strlist__add(list, d->d_name);
}
out:
closedir(dir);
return list;
}
size_t hex_width(u64 v)
{
size_t n = 1;
while ((v >>= 4))
++n;
return n;
}
int perf_event_paranoid(void)
{
int value;
if (sysctl__read_int("kernel/perf_event_paranoid", &value))
return INT_MAX;
return value;
}
bool perf_event_paranoid_check(int max_level)
{
return perf_cap__capable(CAP_SYS_ADMIN) ||
perf_cap__capable(CAP_PERFMON) ||
perf_event_paranoid() <= max_level;
}
static int
fetch_ubuntu_kernel_version(unsigned int *puint)
{
ssize_t len;
size_t line_len = 0;
char *ptr, *line = NULL;
int version, patchlevel, sublevel, err;
FILE *vsig;
if (!puint)
return 0;
vsig = fopen("/proc/version_signature", "r");
if (!vsig) {
pr_debug("Open /proc/version_signature failed: %s\n",
strerror(errno));
return -1;
}
len = getline(&line, &line_len, vsig);
fclose(vsig);
err = -1;
if (len <= 0) {
pr_debug("Reading from /proc/version_signature failed: %s\n",
strerror(errno));
goto errout;
}
ptr = strrchr(line, ' ');
if (!ptr) {
pr_debug("Parsing /proc/version_signature failed: %s\n", line);
goto errout;
}
err = sscanf(ptr + 1, "%d.%d.%d",
&version, &patchlevel, &sublevel);
if (err != 3) {
pr_debug("Unable to get kernel version from /proc/version_signature '%s'\n",
line);
goto errout;
}
*puint = (version << 16) + (patchlevel << 8) + sublevel;
err = 0;
errout:
free(line);
return err;
}
int
fetch_kernel_version(unsigned int *puint, char *str,
size_t str_size)
{
struct utsname utsname;
int version, patchlevel, sublevel, err;
bool int_ver_ready = false;
if (access("/proc/version_signature", R_OK) == 0)
if (!fetch_ubuntu_kernel_version(puint))
int_ver_ready = true;
if (uname(&utsname))
return -1;
if (str && str_size) {
strncpy(str, utsname.release, str_size);
str[str_size - 1] = '\0';
}
if (!puint || int_ver_ready)
return 0;
err = sscanf(utsname.release, "%d.%d.%d",
&version, &patchlevel, &sublevel);
if (err != 3) {
pr_debug("Unable to get kernel version from uname '%s'\n",
utsname.release);
return -1;
}
*puint = (version << 16) + (patchlevel << 8) + sublevel;
return 0;
}
int perf_tip(char **strp, const char *dirpath)
{
struct strlist *tips;
struct str_node *node;
struct strlist_config conf = {
.dirname = dirpath,
.file_only = true,
};
int ret = 0;
*strp = NULL;
tips = strlist__new("tips.txt", &conf);
if (tips == NULL)
return -errno;
if (strlist__nr_entries(tips) == 0)
goto out;
node = strlist__entry(tips, random() % strlist__nr_entries(tips));
if (asprintf(strp, "Tip: %s", node->s) < 0)
ret = -ENOMEM;
out:
strlist__delete(tips);
return ret;
}
char *perf_exe(char *buf, int len)
{
int n = readlink("/proc/self/exe", buf, len);
if (n > 0) {
buf[n] = 0;
return buf;
}
return strcpy(buf, "perf");
}
void perf_debuginfod_setup(struct perf_debuginfod *di)
{
/*
* By default '!di->set' we clear DEBUGINFOD_URLS, so debuginfod
* processing is not triggered, otherwise we set it to 'di->urls'
* value. If 'di->urls' is "system" we keep DEBUGINFOD_URLS value.
*/
if (!di->set)
setenv("DEBUGINFOD_URLS", "", 1);
else if (di->urls && strcmp(di->urls, "system"))
setenv("DEBUGINFOD_URLS", di->urls, 1);
pr_debug("DEBUGINFOD_URLS=%s\n", getenv("DEBUGINFOD_URLS"));
#ifndef HAVE_DEBUGINFOD_SUPPORT
if (di->set)
pr_warning("WARNING: debuginfod support requested, but perf is not built with it\n");
#endif
}
/*
* Return a new filename prepended with task's root directory if it's in
* a chroot. Callers should free the returned string.
*/
char *filename_with_chroot(int pid, const char *filename)
{
char buf[PATH_MAX];
char proc_root[32];
char *new_name = NULL;
int ret;
scnprintf(proc_root, sizeof(proc_root), "/proc/%d/root", pid);
ret = readlink(proc_root, buf, sizeof(buf) - 1);
if (ret <= 0)
return NULL;
/* readlink(2) does not append a null byte to buf */
buf[ret] = '\0';
if (!strcmp(buf, "/"))
return NULL;
if (strstr(buf, "(deleted)"))
return NULL;
if (asprintf(&new_name, "%s/%s", buf, filename) < 0)
return NULL;
return new_name;
}
/*
* Reallocate an array *arr of size *arr_sz so that it is big enough to contain
* x elements of size msz, initializing new entries to *init_val or zero if
* init_val is NULL
*/
int do_realloc_array_as_needed(void **arr, size_t *arr_sz, size_t x, size_t msz, const void *init_val)
{
size_t new_sz = *arr_sz;
void *new_arr;
size_t i;
if (!new_sz)
new_sz = msz >= 64 ? 1 : roundup(64, msz); /* Start with at least 64 bytes */
while (x >= new_sz) {
if (check_mul_overflow(new_sz, (size_t)2, &new_sz))
return -ENOMEM;
}
if (new_sz == *arr_sz)
return 0;
new_arr = calloc(new_sz, msz);
if (!new_arr)
return -ENOMEM;
if (*arr_sz)
memcpy(new_arr, *arr, *arr_sz * msz);
if (init_val) {
for (i = *arr_sz; i < new_sz; i++)
memcpy(new_arr + (i * msz), init_val, msz);
}
*arr = new_arr;
*arr_sz = new_sz;
return 0;
}
#ifndef HAVE_SCHED_GETCPU_SUPPORT
int sched_getcpu(void)
{
#ifdef __NR_getcpu
unsigned int cpu;
int err = syscall(__NR_getcpu, &cpu, NULL, NULL);
if (!err)
return cpu;
#else
errno = ENOSYS;
#endif
return -1;
}
#endif
| linux-master | tools/perf/util/util.c |
// SPDX-License-Identifier: GPL-2.0
#include "debug.h"
#include "dsos.h"
#include "dso.h"
#include "util.h"
#include "vdso.h"
#include "namespaces.h"
#include <errno.h>
#include <libgen.h>
#include <stdlib.h>
#include <string.h>
#include <symbol.h> // filename__read_build_id
#include <unistd.h>
static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
{
if (a->maj > b->maj) return -1;
if (a->maj < b->maj) return 1;
if (a->min > b->min) return -1;
if (a->min < b->min) return 1;
if (a->ino > b->ino) return -1;
if (a->ino < b->ino) return 1;
/*
* Synthesized MMAP events have zero ino_generation, avoid comparing
* them with MMAP events with actual ino_generation.
*
* I found it harmful because the mismatch resulted in a new
* dso that did not have a build ID whereas the original dso did have a
* build ID. The build ID was essential because the object was not found
* otherwise. - Adrian
*/
if (a->ino_generation && b->ino_generation) {
if (a->ino_generation > b->ino_generation) return -1;
if (a->ino_generation < b->ino_generation) return 1;
}
return 0;
}
static bool dso_id__empty(struct dso_id *id)
{
if (!id)
return true;
return !id->maj && !id->min && !id->ino && !id->ino_generation;
}
static void dso__inject_id(struct dso *dso, struct dso_id *id)
{
dso->id.maj = id->maj;
dso->id.min = id->min;
dso->id.ino = id->ino;
dso->id.ino_generation = id->ino_generation;
}
static int dso_id__cmp(struct dso_id *a, struct dso_id *b)
{
/*
* The second is always dso->id, so zeroes if not set, assume passing
* NULL for a means a zeroed id
*/
if (dso_id__empty(a) || dso_id__empty(b))
return 0;
return __dso_id__cmp(a, b);
}
int dso__cmp_id(struct dso *a, struct dso *b)
{
return __dso_id__cmp(&a->id, &b->id);
}
bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
{
bool have_build_id = false;
struct dso *pos;
struct nscookie nsc;
list_for_each_entry(pos, head, node) {
if (with_hits && !pos->hit && !dso__is_vdso(pos))
continue;
if (pos->has_build_id) {
have_build_id = true;
continue;
}
nsinfo__mountns_enter(pos->nsinfo, &nsc);
if (filename__read_build_id(pos->long_name, &pos->bid) > 0) {
have_build_id = true;
pos->has_build_id = true;
} else if (errno == ENOENT && pos->nsinfo) {
char *new_name = dso__filename_with_chroot(pos, pos->long_name);
if (new_name && filename__read_build_id(new_name,
&pos->bid) > 0) {
have_build_id = true;
pos->has_build_id = true;
}
free(new_name);
}
nsinfo__mountns_exit(&nsc);
}
return have_build_id;
}
static int __dso__cmp_long_name(const char *long_name, struct dso_id *id, struct dso *b)
{
int rc = strcmp(long_name, b->long_name);
return rc ?: dso_id__cmp(id, &b->id);
}
static int __dso__cmp_short_name(const char *short_name, struct dso_id *id, struct dso *b)
{
int rc = strcmp(short_name, b->short_name);
return rc ?: dso_id__cmp(id, &b->id);
}
static int dso__cmp_short_name(struct dso *a, struct dso *b)
{
return __dso__cmp_short_name(a->short_name, &a->id, b);
}
/*
* Find a matching entry and/or link current entry to RB tree.
* Either one of the dso or name parameter must be non-NULL or the
* function will not work.
*/
struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso,
const char *name, struct dso_id *id)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
if (!name)
name = dso->long_name;
/*
* Find node with the matching name
*/
while (*p) {
struct dso *this = rb_entry(*p, struct dso, rb_node);
int rc = __dso__cmp_long_name(name, id, this);
parent = *p;
if (rc == 0) {
/*
* In case the new DSO is a duplicate of an existing
* one, print a one-time warning & put the new entry
* at the end of the list of duplicates.
*/
if (!dso || (dso == this))
return this; /* Find matching dso */
/*
* The core kernel DSOs may have duplicated long name.
* In this case, the short name should be different.
* Comparing the short names to differentiate the DSOs.
*/
rc = dso__cmp_short_name(dso, this);
if (rc == 0) {
pr_err("Duplicated dso name: %s\n", name);
return NULL;
}
}
if (rc < 0)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
if (dso) {
/* Add new node and rebalance tree */
rb_link_node(&dso->rb_node, parent, p);
rb_insert_color(&dso->rb_node, root);
dso->root = root;
}
return NULL;
}
void __dsos__add(struct dsos *dsos, struct dso *dso)
{
list_add_tail(&dso->node, &dsos->head);
__dsos__findnew_link_by_longname_id(&dsos->root, dso, NULL, &dso->id);
/*
* It is now in the linked list, grab a reference, then garbage collect
* this when needing memory, by looking at LRU dso instances in the
* list with atomic_read(&dso->refcnt) == 1, i.e. no references
* anywhere besides the one for the list, do, under a lock for the
* list: remove it from the list, then a dso__put(), that probably will
* be the last and will then call dso__delete(), end of life.
*
* That, or at the end of the 'struct machine' lifetime, when all
* 'struct dso' instances will be removed from the list, in
* dsos__exit(), if they have no other reference from some other data
* structure.
*
* E.g.: after processing a 'perf.data' file and storing references
* to objects instantiated while processing events, we will have
* references to the 'thread', 'map', 'dso' structs all from 'struct
* hist_entry' instances, but we may not need anything not referenced,
* so we might as well call machines__exit()/machines__delete() and
* garbage collect it.
*/
dso__get(dso);
}
void dsos__add(struct dsos *dsos, struct dso *dso)
{
down_write(&dsos->lock);
__dsos__add(dsos, dso);
up_write(&dsos->lock);
}
static struct dso *__dsos__findnew_by_longname_id(struct rb_root *root, const char *name, struct dso_id *id)
{
return __dsos__findnew_link_by_longname_id(root, NULL, name, id);
}
static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id, bool cmp_short)
{
struct dso *pos;
if (cmp_short) {
list_for_each_entry(pos, &dsos->head, node)
if (__dso__cmp_short_name(name, id, pos) == 0)
return pos;
return NULL;
}
return __dsos__findnew_by_longname_id(&dsos->root, name, id);
}
struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
{
return __dsos__find_id(dsos, name, NULL, cmp_short);
}
static void dso__set_basename(struct dso *dso)
{
char *base, *lname;
int tid;
if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
if (asprintf(&base, "[JIT] tid %d", tid) < 0)
return;
} else {
/*
* basename() may modify path buffer, so we must pass
* a copy.
*/
lname = strdup(dso->long_name);
if (!lname)
return;
/*
* basename() may return a pointer to internal
* storage which is reused in subsequent calls
* so copy the result.
*/
base = strdup(basename(lname));
free(lname);
if (!base)
return;
}
dso__set_short_name(dso, base, true);
}
static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
struct dso *dso = dso__new_id(name, id);
if (dso != NULL) {
__dsos__add(dsos, dso);
dso__set_basename(dso);
/* Put dso here because __dsos_add already got it */
dso__put(dso);
}
return dso;
}
struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
{
return __dsos__addnew_id(dsos, name, NULL);
}
static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
struct dso *dso = __dsos__find_id(dsos, name, id, false);
if (dso && dso_id__empty(&dso->id) && !dso_id__empty(id))
dso__inject_id(dso, id);
return dso ? dso : __dsos__addnew_id(dsos, name, id);
}
struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
struct dso *dso;
down_write(&dsos->lock);
dso = dso__get(__dsos__findnew_id(dsos, name, id));
up_write(&dsos->lock);
return dso;
}
size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
struct dso *pos;
size_t ret = 0;
list_for_each_entry(pos, head, node) {
char sbuild_id[SBUILD_ID_SIZE];
if (skip && skip(pos, parm))
continue;
build_id__sprintf(&pos->bid, sbuild_id);
ret += fprintf(fp, "%-40s %s\n", sbuild_id, pos->long_name);
}
return ret;
}
size_t __dsos__fprintf(struct list_head *head, FILE *fp)
{
struct dso *pos;
size_t ret = 0;
list_for_each_entry(pos, head, node) {
ret += dso__fprintf(pos, fp);
}
return ret;
}
| linux-master | tools/perf/util/dsos.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/perf_event.h>
#include "util/evsel_fprintf.h"
#include "trace-event.h"
struct bit_names {
int bit;
const char *name;
};
static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
{
bool first_bit = true;
int i = 0;
do {
if (value & bits[i].bit) {
buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
first_bit = false;
}
} while (bits[++i].name != NULL);
}
static void __p_sample_type(char *buf, size_t size, u64 value)
{
#define bit_name(n) { PERF_SAMPLE_##n, #n }
struct bit_names bits[] = {
bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
bit_name(WEIGHT), bit_name(PHYS_ADDR), bit_name(AUX),
bit_name(CGROUP), bit_name(DATA_PAGE_SIZE), bit_name(CODE_PAGE_SIZE),
bit_name(WEIGHT_STRUCT),
{ .name = NULL, }
};
#undef bit_name
__p_bits(buf, size, value, bits);
}
static void __p_branch_sample_type(char *buf, size_t size, u64 value)
{
#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
struct bit_names bits[] = {
bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
bit_name(TYPE_SAVE), bit_name(HW_INDEX), bit_name(PRIV_SAVE),
{ .name = NULL, }
};
#undef bit_name
__p_bits(buf, size, value, bits);
}
static void __p_read_format(char *buf, size_t size, u64 value)
{
#define bit_name(n) { PERF_FORMAT_##n, #n }
struct bit_names bits[] = {
bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
bit_name(ID), bit_name(GROUP), bit_name(LOST),
{ .name = NULL, }
};
#undef bit_name
__p_bits(buf, size, value, bits);
}
#define ENUM_ID_TO_STR_CASE(x) case x: return (#x);
static const char *stringify_perf_type_id(u64 value)
{
switch (value) {
ENUM_ID_TO_STR_CASE(PERF_TYPE_HARDWARE)
ENUM_ID_TO_STR_CASE(PERF_TYPE_SOFTWARE)
ENUM_ID_TO_STR_CASE(PERF_TYPE_TRACEPOINT)
ENUM_ID_TO_STR_CASE(PERF_TYPE_HW_CACHE)
ENUM_ID_TO_STR_CASE(PERF_TYPE_RAW)
ENUM_ID_TO_STR_CASE(PERF_TYPE_BREAKPOINT)
default:
return NULL;
}
}
static const char *stringify_perf_hw_id(u64 value)
{
switch (value) {
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CPU_CYCLES)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_INSTRUCTIONS)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_REFERENCES)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_MISSES)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_BRANCH_MISSES)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_BUS_CYCLES)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_STALLED_CYCLES_FRONTEND)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_STALLED_CYCLES_BACKEND)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_REF_CPU_CYCLES)
default:
return NULL;
}
}
static const char *stringify_perf_hw_cache_id(u64 value)
{
switch (value) {
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_L1D)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_L1I)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_LL)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_DTLB)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_ITLB)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_BPU)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_NODE)
default:
return NULL;
}
}
static const char *stringify_perf_hw_cache_op_id(u64 value)
{
switch (value) {
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_OP_READ)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_OP_WRITE)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_OP_PREFETCH)
default:
return NULL;
}
}
static const char *stringify_perf_hw_cache_op_result_id(u64 value)
{
switch (value) {
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_RESULT_ACCESS)
ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_RESULT_MISS)
default:
return NULL;
}
}
static const char *stringify_perf_sw_id(u64 value)
{
switch (value) {
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CPU_CLOCK)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_TASK_CLOCK)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_PAGE_FAULTS)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CONTEXT_SWITCHES)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CPU_MIGRATIONS)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_PAGE_FAULTS_MIN)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_PAGE_FAULTS_MAJ)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_ALIGNMENT_FAULTS)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_EMULATION_FAULTS)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_DUMMY)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_BPF_OUTPUT)
ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CGROUP_SWITCHES)
default:
return NULL;
}
}
#undef ENUM_ID_TO_STR_CASE
#define PRINT_ID(_s, _f) \
do { \
const char *__s = _s; \
if (__s == NULL) \
snprintf(buf, size, _f, value); \
else \
snprintf(buf, size, _f" (%s)", value, __s); \
} while (0)
#define print_id_unsigned(_s) PRINT_ID(_s, "%"PRIu64)
#define print_id_hex(_s) PRINT_ID(_s, "%#"PRIx64)
static void __p_type_id(char *buf, size_t size, u64 value)
{
print_id_unsigned(stringify_perf_type_id(value));
}
static void __p_config_hw_id(char *buf, size_t size, u64 value)
{
print_id_hex(stringify_perf_hw_id(value));
}
static void __p_config_sw_id(char *buf, size_t size, u64 value)
{
print_id_hex(stringify_perf_sw_id(value));
}
static void __p_config_hw_cache_id(char *buf, size_t size, u64 value)
{
const char *hw_cache_str = stringify_perf_hw_cache_id(value & 0xff);
const char *hw_cache_op_str =
stringify_perf_hw_cache_op_id((value & 0xff00) >> 8);
const char *hw_cache_op_result_str =
stringify_perf_hw_cache_op_result_id((value & 0xff0000) >> 16);
if (hw_cache_str == NULL || hw_cache_op_str == NULL ||
hw_cache_op_result_str == NULL) {
snprintf(buf, size, "%#"PRIx64, value);
} else {
snprintf(buf, size, "%#"PRIx64" (%s | %s | %s)", value,
hw_cache_op_result_str, hw_cache_op_str, hw_cache_str);
}
}
#ifdef HAVE_LIBTRACEEVENT
static void __p_config_tracepoint_id(char *buf, size_t size, u64 value)
{
char *str = tracepoint_id_to_name(value);
print_id_hex(str);
free(str);
}
#endif
static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
{
switch (type) {
case PERF_TYPE_HARDWARE:
return __p_config_hw_id(buf, size, value);
case PERF_TYPE_SOFTWARE:
return __p_config_sw_id(buf, size, value);
case PERF_TYPE_HW_CACHE:
return __p_config_hw_cache_id(buf, size, value);
case PERF_TYPE_TRACEPOINT:
#ifdef HAVE_LIBTRACEEVENT
return __p_config_tracepoint_id(buf, size, value);
#endif
case PERF_TYPE_RAW:
case PERF_TYPE_BREAKPOINT:
default:
snprintf(buf, size, "%#"PRIx64, value);
return;
}
}
#define BUF_SIZE 1024
#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
#define p_type_id(val) __p_type_id(buf, BUF_SIZE, val)
#define p_config_id(val) __p_config_id(buf, BUF_SIZE, attr->type, val)
#define PRINT_ATTRn(_n, _f, _p, _a) \
do { \
if (_a || attr->_f) { \
_p(attr->_f); \
ret += attr__fprintf(fp, _n, buf, priv);\
} \
} while (0)
#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p, false)
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv)
{
char buf[BUF_SIZE];
int ret = 0;
PRINT_ATTRn("type", type, p_type_id, true);
PRINT_ATTRf(size, p_unsigned);
PRINT_ATTRn("config", config, p_config_id, true);
PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned, false);
PRINT_ATTRf(sample_type, p_sample_type);
PRINT_ATTRf(read_format, p_read_format);
PRINT_ATTRf(disabled, p_unsigned);
PRINT_ATTRf(inherit, p_unsigned);
PRINT_ATTRf(pinned, p_unsigned);
PRINT_ATTRf(exclusive, p_unsigned);
PRINT_ATTRf(exclude_user, p_unsigned);
PRINT_ATTRf(exclude_kernel, p_unsigned);
PRINT_ATTRf(exclude_hv, p_unsigned);
PRINT_ATTRf(exclude_idle, p_unsigned);
PRINT_ATTRf(mmap, p_unsigned);
PRINT_ATTRf(comm, p_unsigned);
PRINT_ATTRf(freq, p_unsigned);
PRINT_ATTRf(inherit_stat, p_unsigned);
PRINT_ATTRf(enable_on_exec, p_unsigned);
PRINT_ATTRf(task, p_unsigned);
PRINT_ATTRf(watermark, p_unsigned);
PRINT_ATTRf(precise_ip, p_unsigned);
PRINT_ATTRf(mmap_data, p_unsigned);
PRINT_ATTRf(sample_id_all, p_unsigned);
PRINT_ATTRf(exclude_host, p_unsigned);
PRINT_ATTRf(exclude_guest, p_unsigned);
PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
PRINT_ATTRf(exclude_callchain_user, p_unsigned);
PRINT_ATTRf(mmap2, p_unsigned);
PRINT_ATTRf(comm_exec, p_unsigned);
PRINT_ATTRf(use_clockid, p_unsigned);
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
PRINT_ATTRf(namespaces, p_unsigned);
PRINT_ATTRf(ksymbol, p_unsigned);
PRINT_ATTRf(bpf_event, p_unsigned);
PRINT_ATTRf(aux_output, p_unsigned);
PRINT_ATTRf(cgroup, p_unsigned);
PRINT_ATTRf(text_poke, p_unsigned);
PRINT_ATTRf(build_id, p_unsigned);
PRINT_ATTRf(inherit_thread, p_unsigned);
PRINT_ATTRf(remove_on_exec, p_unsigned);
PRINT_ATTRf(sigtrap, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned, false);
PRINT_ATTRf(bp_type, p_unsigned);
PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex, false);
PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex, false);
PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
PRINT_ATTRf(sample_regs_user, p_hex);
PRINT_ATTRf(sample_stack_user, p_unsigned);
PRINT_ATTRf(clockid, p_signed);
PRINT_ATTRf(sample_regs_intr, p_hex);
PRINT_ATTRf(aux_watermark, p_unsigned);
PRINT_ATTRf(sample_max_stack, p_unsigned);
PRINT_ATTRf(aux_sample_size, p_unsigned);
PRINT_ATTRf(sig_data, p_unsigned);
return ret;
}
| linux-master | tools/perf/util/perf_event_attr_fprintf.c |
// SPDX-License-Identifier: GPL-2.0
#include "cache.h"
#include "config.h"
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <subcmd/help.h>
#include "../builtin.h"
#include "levenshtein.h"
#include <linux/zalloc.h>
static int autocorrect;
static int perf_unknown_cmd_config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "help.autocorrect"))
return perf_config_int(&autocorrect, var,value);
return 0;
}
static int levenshtein_compare(const void *p1, const void *p2)
{
const struct cmdname *const *c1 = p1, *const *c2 = p2;
const char *s1 = (*c1)->name, *s2 = (*c2)->name;
int l1 = (*c1)->len;
int l2 = (*c2)->len;
return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
}
static int add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
{
unsigned int i, nr = cmds->cnt + old->cnt;
void *tmp;
if (nr > cmds->alloc) {
/* Choose bigger one to alloc */
if (alloc_nr(cmds->alloc) < nr)
cmds->alloc = nr;
else
cmds->alloc = alloc_nr(cmds->alloc);
tmp = realloc(cmds->names, cmds->alloc * sizeof(*cmds->names));
if (!tmp)
return -1;
cmds->names = tmp;
}
for (i = 0; i < old->cnt; i++)
cmds->names[cmds->cnt++] = old->names[i];
zfree(&old->names);
old->cnt = 0;
return 0;
}
const char *help_unknown_cmd(const char *cmd)
{
unsigned int i, n = 0, best_similarity = 0;
struct cmdnames main_cmds, other_cmds;
memset(&main_cmds, 0, sizeof(main_cmds));
memset(&other_cmds, 0, sizeof(main_cmds));
perf_config(perf_unknown_cmd_config, NULL);
load_command_list("perf-", &main_cmds, &other_cmds);
if (add_cmd_list(&main_cmds, &other_cmds) < 0) {
fprintf(stderr, "ERROR: Failed to allocate command list for unknown command.\n");
goto end;
}
qsort(main_cmds.names, main_cmds.cnt,
sizeof(main_cmds.names), cmdname_compare);
uniq(&main_cmds);
if (main_cmds.cnt) {
/* This reuses cmdname->len for similarity index */
for (i = 0; i < main_cmds.cnt; ++i)
main_cmds.names[i]->len =
levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4);
qsort(main_cmds.names, main_cmds.cnt,
sizeof(*main_cmds.names), levenshtein_compare);
best_similarity = main_cmds.names[0]->len;
n = 1;
while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len)
++n;
}
if (autocorrect && n == 1) {
const char *assumed = main_cmds.names[0]->name;
main_cmds.names[0] = NULL;
clean_cmdnames(&main_cmds);
clean_cmdnames(&other_cmds);
fprintf(stderr, "WARNING: You called a perf program named '%s', "
"which does not exist.\n"
"Continuing under the assumption that you meant '%s'\n",
cmd, assumed);
if (autocorrect > 0) {
fprintf(stderr, "in %0.1f seconds automatically...\n",
(float)autocorrect/10.0);
poll(NULL, 0, autocorrect * 100);
}
return assumed;
}
fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd);
if (main_cmds.cnt && best_similarity < 6) {
fprintf(stderr, "\nDid you mean %s?\n",
n < 2 ? "this": "one of these");
for (i = 0; i < n; i++)
fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
}
end:
clean_cmdnames(&main_cmds);
clean_cmdnames(&other_cmds);
exit(1);
}
| linux-master | tools/perf/util/help-unknown-cmd.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <stdlib.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <bpf/bpf.h>
#include "bpf-utils.h"
#include "debug.h"
struct bpil_array_desc {
int array_offset; /* e.g. offset of jited_prog_insns */
int count_offset; /* e.g. offset of jited_prog_len */
int size_offset; /* > 0: offset of rec size,
* < 0: fix size of -size_offset
*/
};
static struct bpil_array_desc bpil_array_desc[] = {
[PERF_BPIL_JITED_INSNS] = {
offsetof(struct bpf_prog_info, jited_prog_insns),
offsetof(struct bpf_prog_info, jited_prog_len),
-1,
},
[PERF_BPIL_XLATED_INSNS] = {
offsetof(struct bpf_prog_info, xlated_prog_insns),
offsetof(struct bpf_prog_info, xlated_prog_len),
-1,
},
[PERF_BPIL_MAP_IDS] = {
offsetof(struct bpf_prog_info, map_ids),
offsetof(struct bpf_prog_info, nr_map_ids),
-(int)sizeof(__u32),
},
[PERF_BPIL_JITED_KSYMS] = {
offsetof(struct bpf_prog_info, jited_ksyms),
offsetof(struct bpf_prog_info, nr_jited_ksyms),
-(int)sizeof(__u64),
},
[PERF_BPIL_JITED_FUNC_LENS] = {
offsetof(struct bpf_prog_info, jited_func_lens),
offsetof(struct bpf_prog_info, nr_jited_func_lens),
-(int)sizeof(__u32),
},
[PERF_BPIL_FUNC_INFO] = {
offsetof(struct bpf_prog_info, func_info),
offsetof(struct bpf_prog_info, nr_func_info),
offsetof(struct bpf_prog_info, func_info_rec_size),
},
[PERF_BPIL_LINE_INFO] = {
offsetof(struct bpf_prog_info, line_info),
offsetof(struct bpf_prog_info, nr_line_info),
offsetof(struct bpf_prog_info, line_info_rec_size),
},
[PERF_BPIL_JITED_LINE_INFO] = {
offsetof(struct bpf_prog_info, jited_line_info),
offsetof(struct bpf_prog_info, nr_jited_line_info),
offsetof(struct bpf_prog_info, jited_line_info_rec_size),
},
[PERF_BPIL_PROG_TAGS] = {
offsetof(struct bpf_prog_info, prog_tags),
offsetof(struct bpf_prog_info, nr_prog_tags),
-(int)sizeof(__u8) * BPF_TAG_SIZE,
},
};
static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
int offset)
{
__u32 *array = (__u32 *)info;
if (offset >= 0)
return array[offset / sizeof(__u32)];
return -(int)offset;
}
static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
int offset)
{
__u64 *array = (__u64 *)info;
if (offset >= 0)
return array[offset / sizeof(__u64)];
return -(int)offset;
}
static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
__u32 val)
{
__u32 *array = (__u32 *)info;
if (offset >= 0)
array[offset / sizeof(__u32)] = val;
}
static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
__u64 val)
{
__u64 *array = (__u64 *)info;
if (offset >= 0)
array[offset / sizeof(__u64)] = val;
}
struct perf_bpil *
get_bpf_prog_info_linear(int fd, __u64 arrays)
{
struct bpf_prog_info info = {};
struct perf_bpil *info_linear;
__u32 info_len = sizeof(info);
__u32 data_len = 0;
int i, err;
void *ptr;
if (arrays >> PERF_BPIL_LAST_ARRAY)
return ERR_PTR(-EINVAL);
/* step 1: get array dimensions */
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (err) {
pr_debug("can't get prog info: %s", strerror(errno));
return ERR_PTR(-EFAULT);
}
/* step 2: calculate total size of all arrays */
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
bool include_array = (arrays & (1UL << i)) > 0;
struct bpil_array_desc *desc;
__u32 count, size;
desc = bpil_array_desc + i;
/* kernel is too old to support this field */
if (info_len < desc->array_offset + sizeof(__u32) ||
info_len < desc->count_offset + sizeof(__u32) ||
(desc->size_offset > 0 && info_len < (__u32)desc->size_offset))
include_array = false;
if (!include_array) {
arrays &= ~(1UL << i); /* clear the bit */
continue;
}
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
data_len += roundup(count * size, sizeof(__u64));
}
/* step 3: allocate continuous memory */
info_linear = malloc(sizeof(struct perf_bpil) + data_len);
if (!info_linear)
return ERR_PTR(-ENOMEM);
/* step 4: fill data to info_linear->info */
info_linear->arrays = arrays;
memset(&info_linear->info, 0, sizeof(info));
ptr = info_linear->data;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
struct bpil_array_desc *desc;
__u32 count, size;
if ((arrays & (1UL << i)) == 0)
continue;
desc = bpil_array_desc + i;
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->count_offset, count);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->size_offset, size);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset,
ptr_to_u64(ptr));
ptr += roundup(count * size, sizeof(__u64));
}
/* step 5: call syscall again to get required arrays */
err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
if (err) {
pr_debug("can't get prog info: %s", strerror(errno));
free(info_linear);
return ERR_PTR(-EFAULT);
}
/* step 6: verify the data */
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
struct bpil_array_desc *desc;
__u32 v1, v2;
if ((arrays & (1UL << i)) == 0)
continue;
desc = bpil_array_desc + i;
v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->count_offset);
if (v1 != v2)
pr_warning("%s: mismatch in element count\n", __func__);
v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->size_offset);
if (v1 != v2)
pr_warning("%s: mismatch in rec size\n", __func__);
}
/* step 7: update info_len and data_len */
info_linear->info_len = sizeof(struct bpf_prog_info);
info_linear->data_len = data_len;
return info_linear;
}
void bpil_addr_to_offs(struct perf_bpil *info_linear)
{
int i;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
struct bpil_array_desc *desc;
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
desc = bpil_array_desc + i;
addr = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
offs = addr - ptr_to_u64(info_linear->data);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset, offs);
}
}
void bpil_offs_to_addr(struct perf_bpil *info_linear)
{
int i;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
struct bpil_array_desc *desc;
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
desc = bpil_array_desc + i;
offs = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
addr = offs + ptr_to_u64(info_linear->data);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset, addr);
}
}
| linux-master | tools/perf/util/bpf-utils.c |
// SPDX-License-Identifier: GPL-2.0
#include "addr_location.h"
#include "map.h"
#include "maps.h"
#include "thread.h"
void addr_location__init(struct addr_location *al)
{
al->thread = NULL;
al->maps = NULL;
al->map = NULL;
al->sym = NULL;
al->srcline = NULL;
al->addr = 0;
al->level = 0;
al->filtered = 0;
al->cpumode = 0;
al->cpu = 0;
al->socket = 0;
}
/*
* The preprocess_sample method will return with reference counts for the
* in it, when done using (and perhaps getting ref counts if needing to
* keep a pointer to one of those entries) it must be paired with
* addr_location__put(), so that the refcounts can be decremented.
*/
void addr_location__exit(struct addr_location *al)
{
map__zput(al->map);
thread__zput(al->thread);
maps__zput(al->maps);
}
void addr_location__copy(struct addr_location *dst, struct addr_location *src)
{
thread__put(dst->thread);
maps__put(dst->maps);
map__put(dst->map);
*dst = *src;
dst->thread = thread__get(src->thread);
dst->maps = maps__get(src->maps);
dst->map = map__get(src->map);
}
| linux-master | tools/perf/util/addr_location.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Helper functions for handling target threads/cpus
*
* Copyright (C) 2012, LG Electronics, Namhyung Kim <[email protected]>
*/
#include "target.h"
#include <pwd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <linux/kernel.h>
#include <linux/string.h>
enum target_errno target__validate(struct target *target)
{
enum target_errno ret = TARGET_ERRNO__SUCCESS;
if (target->pid)
target->tid = target->pid;
/* CPU and PID are mutually exclusive */
if (target->tid && target->cpu_list) {
target->cpu_list = NULL;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
}
/* UID and PID are mutually exclusive */
if (target->tid && target->uid_str) {
target->uid_str = NULL;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__PID_OVERRIDE_UID;
}
/* UID and CPU are mutually exclusive */
if (target->uid_str && target->cpu_list) {
target->cpu_list = NULL;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
}
/* PID and SYSTEM are mutually exclusive */
if (target->tid && target->system_wide) {
target->system_wide = false;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
}
/* UID and SYSTEM are mutually exclusive */
if (target->uid_str && target->system_wide) {
target->system_wide = false;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
}
/* BPF and CPU are mutually exclusive */
if (target->bpf_str && target->cpu_list) {
target->cpu_list = NULL;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__BPF_OVERRIDE_CPU;
}
/* BPF and PID/TID are mutually exclusive */
if (target->bpf_str && target->tid) {
target->tid = NULL;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__BPF_OVERRIDE_PID;
}
/* BPF and UID are mutually exclusive */
if (target->bpf_str && target->uid_str) {
target->uid_str = NULL;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__BPF_OVERRIDE_UID;
}
/* BPF and THREADS are mutually exclusive */
if (target->bpf_str && target->per_thread) {
target->per_thread = false;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__BPF_OVERRIDE_THREAD;
}
/* THREAD and SYSTEM/CPU are mutually exclusive */
if (target->per_thread && (target->system_wide || target->cpu_list)) {
target->per_thread = false;
if (ret == TARGET_ERRNO__SUCCESS)
ret = TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD;
}
return ret;
}
enum target_errno target__parse_uid(struct target *target)
{
struct passwd pwd, *result;
char buf[1024];
const char *str = target->uid_str;
target->uid = UINT_MAX;
if (str == NULL)
return TARGET_ERRNO__SUCCESS;
/* Try user name first */
getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
if (result == NULL) {
/*
* The user name not found. Maybe it's a UID number.
*/
char *endptr;
int uid = strtol(str, &endptr, 10);
if (*endptr != '\0')
return TARGET_ERRNO__INVALID_UID;
getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
if (result == NULL)
return TARGET_ERRNO__USER_NOT_FOUND;
}
target->uid = result->pw_uid;
return TARGET_ERRNO__SUCCESS;
}
/*
* This must have a same ordering as the enum target_errno.
*/
static const char *target__error_str[] = {
"PID/TID switch overriding CPU",
"PID/TID switch overriding UID",
"UID switch overriding CPU",
"PID/TID switch overriding SYSTEM",
"UID switch overriding SYSTEM",
"SYSTEM/CPU switch overriding PER-THREAD",
"BPF switch overriding CPU",
"BPF switch overriding PID/TID",
"BPF switch overriding UID",
"BPF switch overriding THREAD",
"Invalid User: %s",
"Problems obtaining information for user %s",
};
int target__strerror(struct target *target, int errnum,
char *buf, size_t buflen)
{
int idx;
const char *msg;
BUG_ON(buflen == 0);
if (errnum >= 0) {
str_error_r(errnum, buf, buflen);
return 0;
}
if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
return -1;
idx = errnum - __TARGET_ERRNO__START;
msg = target__error_str[idx];
switch (errnum) {
case TARGET_ERRNO__PID_OVERRIDE_CPU ...
TARGET_ERRNO__BPF_OVERRIDE_THREAD:
snprintf(buf, buflen, "%s", msg);
break;
case TARGET_ERRNO__INVALID_UID:
case TARGET_ERRNO__USER_NOT_FOUND:
snprintf(buf, buflen, msg, target->uid_str);
break;
default:
/* cannot reach here */
break;
}
return 0;
}
| linux-master | tools/perf/util/target.c |
// SPDX-License-Identifier: GPL-2.0
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <zlib.h>
#include <linux/compiler.h>
#include <internal/lib.h>
#include "util/compress.h"
#define CHUNK_SIZE 16384
int gzip_decompress_to_file(const char *input, int output_fd)
{
int ret = Z_STREAM_ERROR;
int input_fd;
void *ptr;
int len;
struct stat stbuf;
unsigned char buf[CHUNK_SIZE];
z_stream zs = {
.zalloc = Z_NULL,
.zfree = Z_NULL,
.opaque = Z_NULL,
.avail_in = 0,
.next_in = Z_NULL,
};
input_fd = open(input, O_RDONLY);
if (input_fd < 0)
return -1;
if (fstat(input_fd, &stbuf) < 0)
goto out_close;
ptr = mmap(NULL, stbuf.st_size, PROT_READ, MAP_PRIVATE, input_fd, 0);
if (ptr == MAP_FAILED)
goto out_close;
if (inflateInit2(&zs, 16 + MAX_WBITS) != Z_OK)
goto out_unmap;
zs.next_in = ptr;
zs.avail_in = stbuf.st_size;
do {
zs.next_out = buf;
zs.avail_out = CHUNK_SIZE;
ret = inflate(&zs, Z_NO_FLUSH);
switch (ret) {
case Z_NEED_DICT:
ret = Z_DATA_ERROR;
/* fall through */
case Z_DATA_ERROR:
case Z_MEM_ERROR:
goto out;
default:
break;
}
len = CHUNK_SIZE - zs.avail_out;
if (writen(output_fd, buf, len) != len) {
ret = Z_DATA_ERROR;
goto out;
}
} while (ret != Z_STREAM_END);
out:
inflateEnd(&zs);
out_unmap:
munmap(ptr, stbuf.st_size);
out_close:
close(input_fd);
return ret == Z_STREAM_END ? 0 : -1;
}
bool gzip_is_compressed(const char *input)
{
int fd = open(input, O_RDONLY);
const uint8_t magic[2] = { 0x1f, 0x8b };
char buf[2] = { 0 };
ssize_t rc;
if (fd < 0)
return -1;
rc = read(fd, buf, sizeof(buf));
close(fd);
return rc == sizeof(buf) ?
memcmp(buf, magic, sizeof(buf)) == 0 : false;
}
| linux-master | tools/perf/util/zlib.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/iostat.h"
#include "util/debug.h"
enum iostat_mode_t iostat_mode = IOSTAT_NONE;
__weak int iostat_prepare(struct evlist *evlist __maybe_unused,
struct perf_stat_config *config __maybe_unused)
{
return -1;
}
__weak int iostat_parse(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
pr_err("iostat mode is not supported on current platform\n");
return -1;
}
__weak void iostat_list(struct evlist *evlist __maybe_unused,
struct perf_stat_config *config __maybe_unused)
{
}
__weak void iostat_release(struct evlist *evlist __maybe_unused)
{
}
__weak void iostat_print_header_prefix(struct perf_stat_config *config __maybe_unused)
{
}
__weak void iostat_print_metric(struct perf_stat_config *config __maybe_unused,
struct evsel *evsel __maybe_unused,
struct perf_stat_output_ctx *out __maybe_unused)
{
}
__weak void iostat_prefix(struct evlist *evlist __maybe_unused,
struct perf_stat_config *config __maybe_unused,
char *prefix __maybe_unused,
struct timespec *ts __maybe_unused)
{
}
__weak void iostat_print_counters(struct evlist *evlist __maybe_unused,
struct perf_stat_config *config __maybe_unused,
struct timespec *ts __maybe_unused,
char *prefix __maybe_unused,
iostat_print_counter_t print_cnt_cb __maybe_unused,
void *arg __maybe_unused)
{
}
| linux-master | tools/perf/util/iostat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* CTF writing support via babeltrace.
*
* Copyright (C) 2014, Jiri Olsa <[email protected]>
* Copyright (C) 2014, Sebastian Andrzej Siewior <[email protected]>
*/
#include <errno.h>
#include <inttypes.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <babeltrace/ctf-writer/writer.h>
#include <babeltrace/ctf-writer/clock.h>
#include <babeltrace/ctf-writer/stream.h>
#include <babeltrace/ctf-writer/event.h>
#include <babeltrace/ctf-writer/event-types.h>
#include <babeltrace/ctf-writer/event-fields.h>
#include <babeltrace/ctf-ir/utils.h>
#include <babeltrace/ctf/events.h>
#include "asm/bug.h"
#include "data-convert.h"
#include "session.h"
#include "debug.h"
#include "tool.h"
#include "evlist.h"
#include "evsel.h"
#include "machine.h"
#include "config.h"
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/time64.h>
#include "util.h"
#include "clockid.h"
#include "util/sample.h"
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
#define pr_N(n, fmt, ...) \
eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
struct evsel_priv {
struct bt_ctf_event_class *event_class;
};
#define MAX_CPUS 4096
struct ctf_stream {
struct bt_ctf_stream *stream;
int cpu;
u32 count;
};
struct ctf_writer {
/* writer primitives */
struct bt_ctf_writer *writer;
struct ctf_stream **stream;
int stream_cnt;
struct bt_ctf_stream_class *stream_class;
struct bt_ctf_clock *clock;
/* data types */
union {
struct {
struct bt_ctf_field_type *s64;
struct bt_ctf_field_type *u64;
struct bt_ctf_field_type *s32;
struct bt_ctf_field_type *u32;
struct bt_ctf_field_type *string;
struct bt_ctf_field_type *u32_hex;
struct bt_ctf_field_type *u64_hex;
};
struct bt_ctf_field_type *array[6];
} data;
struct bt_ctf_event_class *comm_class;
struct bt_ctf_event_class *exit_class;
struct bt_ctf_event_class *fork_class;
struct bt_ctf_event_class *mmap_class;
struct bt_ctf_event_class *mmap2_class;
};
struct convert {
struct perf_tool tool;
struct ctf_writer writer;
u64 events_size;
u64 events_count;
u64 non_sample_count;
/* Ordered events configured queue size. */
u64 queue_size;
};
static int value_set(struct bt_ctf_field_type *type,
struct bt_ctf_event *event,
const char *name, u64 val)
{
struct bt_ctf_field *field;
bool sign = bt_ctf_field_type_integer_get_signed(type);
int ret;
field = bt_ctf_field_create(type);
if (!field) {
pr_err("failed to create a field %s\n", name);
return -1;
}
if (sign) {
ret = bt_ctf_field_signed_integer_set_value(field, val);
if (ret) {
pr_err("failed to set field value %s\n", name);
goto err;
}
} else {
ret = bt_ctf_field_unsigned_integer_set_value(field, val);
if (ret) {
pr_err("failed to set field value %s\n", name);
goto err;
}
}
ret = bt_ctf_event_set_payload(event, name, field);
if (ret) {
pr_err("failed to set payload %s\n", name);
goto err;
}
pr2(" SET [%s = %" PRIu64 "]\n", name, val);
err:
bt_ctf_field_put(field);
return ret;
}
#define __FUNC_VALUE_SET(_name, _val_type) \
static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
struct bt_ctf_event *event, \
const char *name, \
_val_type val) \
{ \
struct bt_ctf_field_type *type = cw->data._name; \
return value_set(type, event, name, (u64) val); \
}
#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
FUNC_VALUE_SET(s32)
FUNC_VALUE_SET(u32)
FUNC_VALUE_SET(s64)
FUNC_VALUE_SET(u64)
__FUNC_VALUE_SET(u64_hex, u64)
static int string_set_value(struct bt_ctf_field *field, const char *string);
static __maybe_unused int
value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
const char *name, const char *string)
{
struct bt_ctf_field_type *type = cw->data.string;
struct bt_ctf_field *field;
int ret = 0;
field = bt_ctf_field_create(type);
if (!field) {
pr_err("failed to create a field %s\n", name);
return -1;
}
ret = string_set_value(field, string);
if (ret) {
pr_err("failed to set value %s\n", name);
goto err_put_field;
}
ret = bt_ctf_event_set_payload(event, name, field);
if (ret)
pr_err("failed to set payload %s\n", name);
err_put_field:
bt_ctf_field_put(field);
return ret;
}
static struct bt_ctf_field_type*
get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
{
unsigned long flags = field->flags;
if (flags & TEP_FIELD_IS_STRING)
return cw->data.string;
if (!(flags & TEP_FIELD_IS_SIGNED)) {
/* unsigned long are mostly pointers */
if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
return cw->data.u64_hex;
}
if (flags & TEP_FIELD_IS_SIGNED) {
if (field->size == 8)
return cw->data.s64;
else
return cw->data.s32;
}
if (field->size == 8)
return cw->data.u64;
else
return cw->data.u32;
}
static unsigned long long adjust_signedness(unsigned long long value_int, int size)
{
unsigned long long value_mask;
/*
* value_mask = (1 << (size * 8 - 1)) - 1.
* Directly set value_mask for code readers.
*/
switch (size) {
case 1:
value_mask = 0x7fULL;
break;
case 2:
value_mask = 0x7fffULL;
break;
case 4:
value_mask = 0x7fffffffULL;
break;
case 8:
/*
* For 64 bit value, return it self. There is no need
* to fill high bit.
*/
/* Fall through */
default:
/* BUG! */
return value_int;
}
/* If it is a positive value, don't adjust. */
if ((value_int & (~0ULL - value_mask)) == 0)
return value_int;
/* Fill upper part of value_int with 1 to make it a negative long long. */
return (value_int & value_mask) | ~value_mask;
}
static int string_set_value(struct bt_ctf_field *field, const char *string)
{
char *buffer = NULL;
size_t len = strlen(string), i, p;
int err;
for (i = p = 0; i < len; i++, p++) {
if (isprint(string[i])) {
if (!buffer)
continue;
buffer[p] = string[i];
} else {
char numstr[5];
snprintf(numstr, sizeof(numstr), "\\x%02x",
(unsigned int)(string[i]) & 0xff);
if (!buffer) {
buffer = zalloc(i + (len - i) * 4 + 2);
if (!buffer) {
pr_err("failed to set unprintable string '%s'\n", string);
return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
}
if (i > 0)
strncpy(buffer, string, i);
}
memcpy(buffer + p, numstr, 4);
p += 3;
}
}
if (!buffer)
return bt_ctf_field_string_set_value(field, string);
err = bt_ctf_field_string_set_value(field, buffer);
free(buffer);
return err;
}
static int add_tracepoint_field_value(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
struct perf_sample *sample,
struct tep_format_field *fmtf)
{
struct bt_ctf_field_type *type;
struct bt_ctf_field *array_field;
struct bt_ctf_field *field;
const char *name = fmtf->name;
void *data = sample->raw_data;
unsigned long flags = fmtf->flags;
unsigned int n_items;
unsigned int i;
unsigned int offset;
unsigned int len;
int ret;
name = fmtf->alias;
offset = fmtf->offset;
len = fmtf->size;
if (flags & TEP_FIELD_IS_STRING)
flags &= ~TEP_FIELD_IS_ARRAY;
if (flags & TEP_FIELD_IS_DYNAMIC) {
unsigned long long tmp_val;
tmp_val = tep_read_number(fmtf->event->tep,
data + offset, len);
offset = tmp_val;
len = offset >> 16;
offset &= 0xffff;
if (tep_field_is_relative(flags))
offset += fmtf->offset + fmtf->size;
}
if (flags & TEP_FIELD_IS_ARRAY) {
type = bt_ctf_event_class_get_field_by_name(
event_class, name);
array_field = bt_ctf_field_create(type);
bt_ctf_field_type_put(type);
if (!array_field) {
pr_err("Failed to create array type %s\n", name);
return -1;
}
len = fmtf->size / fmtf->arraylen;
n_items = fmtf->arraylen;
} else {
n_items = 1;
array_field = NULL;
}
type = get_tracepoint_field_type(cw, fmtf);
for (i = 0; i < n_items; i++) {
if (flags & TEP_FIELD_IS_ARRAY)
field = bt_ctf_field_array_get_field(array_field, i);
else
field = bt_ctf_field_create(type);
if (!field) {
pr_err("failed to create a field %s\n", name);
return -1;
}
if (flags & TEP_FIELD_IS_STRING)
ret = string_set_value(field, data + offset + i * len);
else {
unsigned long long value_int;
value_int = tep_read_number(
fmtf->event->tep,
data + offset + i * len, len);
if (!(flags & TEP_FIELD_IS_SIGNED))
ret = bt_ctf_field_unsigned_integer_set_value(
field, value_int);
else
ret = bt_ctf_field_signed_integer_set_value(
field, adjust_signedness(value_int, len));
}
if (ret) {
pr_err("failed to set file value %s\n", name);
goto err_put_field;
}
if (!(flags & TEP_FIELD_IS_ARRAY)) {
ret = bt_ctf_event_set_payload(event, name, field);
if (ret) {
pr_err("failed to set payload %s\n", name);
goto err_put_field;
}
}
bt_ctf_field_put(field);
}
if (flags & TEP_FIELD_IS_ARRAY) {
ret = bt_ctf_event_set_payload(event, name, array_field);
if (ret) {
pr_err("Failed add payload array %s\n", name);
return -1;
}
bt_ctf_field_put(array_field);
}
return 0;
err_put_field:
bt_ctf_field_put(field);
return -1;
}
static int add_tracepoint_fields_values(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
struct tep_format_field *fields,
struct perf_sample *sample)
{
struct tep_format_field *field;
int ret;
for (field = fields; field; field = field->next) {
ret = add_tracepoint_field_value(cw, event_class, event, sample,
field);
if (ret)
return -1;
}
return 0;
}
static int add_tracepoint_values(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
struct evsel *evsel,
struct perf_sample *sample)
{
struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
struct tep_format_field *fields = evsel->tp_format->format.fields;
int ret;
ret = add_tracepoint_fields_values(cw, event_class, event,
common_fields, sample);
if (!ret)
ret = add_tracepoint_fields_values(cw, event_class, event,
fields, sample);
return ret;
}
static int
add_bpf_output_values(struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
struct perf_sample *sample)
{
struct bt_ctf_field_type *len_type, *seq_type;
struct bt_ctf_field *len_field, *seq_field;
unsigned int raw_size = sample->raw_size;
unsigned int nr_elements = raw_size / sizeof(u32);
unsigned int i;
int ret;
if (nr_elements * sizeof(u32) != raw_size)
pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
raw_size, nr_elements * sizeof(u32) - raw_size);
len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
len_field = bt_ctf_field_create(len_type);
if (!len_field) {
pr_err("failed to create 'raw_len' for bpf output event\n");
ret = -1;
goto put_len_type;
}
ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
if (ret) {
pr_err("failed to set field value for raw_len\n");
goto put_len_field;
}
ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
if (ret) {
pr_err("failed to set payload to raw_len\n");
goto put_len_field;
}
seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
seq_field = bt_ctf_field_create(seq_type);
if (!seq_field) {
pr_err("failed to create 'raw_data' for bpf output event\n");
ret = -1;
goto put_seq_type;
}
ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
if (ret) {
pr_err("failed to set length of 'raw_data'\n");
goto put_seq_field;
}
for (i = 0; i < nr_elements; i++) {
struct bt_ctf_field *elem_field =
bt_ctf_field_sequence_get_field(seq_field, i);
ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
((u32 *)(sample->raw_data))[i]);
bt_ctf_field_put(elem_field);
if (ret) {
pr_err("failed to set raw_data[%d]\n", i);
goto put_seq_field;
}
}
ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
if (ret)
pr_err("failed to set payload for raw_data\n");
put_seq_field:
bt_ctf_field_put(seq_field);
put_seq_type:
bt_ctf_field_type_put(seq_type);
put_len_field:
bt_ctf_field_put(len_field);
put_len_type:
bt_ctf_field_type_put(len_type);
return ret;
}
static int
add_callchain_output_values(struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
struct ip_callchain *callchain)
{
struct bt_ctf_field_type *len_type, *seq_type;
struct bt_ctf_field *len_field, *seq_field;
unsigned int nr_elements = callchain->nr;
unsigned int i;
int ret;
len_type = bt_ctf_event_class_get_field_by_name(
event_class, "perf_callchain_size");
len_field = bt_ctf_field_create(len_type);
if (!len_field) {
pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
ret = -1;
goto put_len_type;
}
ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
if (ret) {
pr_err("failed to set field value for perf_callchain_size\n");
goto put_len_field;
}
ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
if (ret) {
pr_err("failed to set payload to perf_callchain_size\n");
goto put_len_field;
}
seq_type = bt_ctf_event_class_get_field_by_name(
event_class, "perf_callchain");
seq_field = bt_ctf_field_create(seq_type);
if (!seq_field) {
pr_err("failed to create 'perf_callchain' for callchain output event\n");
ret = -1;
goto put_seq_type;
}
ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
if (ret) {
pr_err("failed to set length of 'perf_callchain'\n");
goto put_seq_field;
}
for (i = 0; i < nr_elements; i++) {
struct bt_ctf_field *elem_field =
bt_ctf_field_sequence_get_field(seq_field, i);
ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
((u64 *)(callchain->ips))[i]);
bt_ctf_field_put(elem_field);
if (ret) {
pr_err("failed to set callchain[%d]\n", i);
goto put_seq_field;
}
}
ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
if (ret)
pr_err("failed to set payload for raw_data\n");
put_seq_field:
bt_ctf_field_put(seq_field);
put_seq_type:
bt_ctf_field_type_put(seq_type);
put_len_field:
bt_ctf_field_put(len_field);
put_len_type:
bt_ctf_field_type_put(len_type);
return ret;
}
static int add_generic_values(struct ctf_writer *cw,
struct bt_ctf_event *event,
struct evsel *evsel,
struct perf_sample *sample)
{
u64 type = evsel->core.attr.sample_type;
int ret;
/*
* missing:
* PERF_SAMPLE_TIME - not needed as we have it in
* ctf event header
* PERF_SAMPLE_READ - TODO
* PERF_SAMPLE_RAW - tracepoint fields are handled separately
* PERF_SAMPLE_BRANCH_STACK - TODO
* PERF_SAMPLE_REGS_USER - TODO
* PERF_SAMPLE_STACK_USER - TODO
*/
if (type & PERF_SAMPLE_IP) {
ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_TID) {
ret = value_set_s32(cw, event, "perf_tid", sample->tid);
if (ret)
return -1;
ret = value_set_s32(cw, event, "perf_pid", sample->pid);
if (ret)
return -1;
}
if ((type & PERF_SAMPLE_ID) ||
(type & PERF_SAMPLE_IDENTIFIER)) {
ret = value_set_u64(cw, event, "perf_id", sample->id);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_STREAM_ID) {
ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_PERIOD) {
ret = value_set_u64(cw, event, "perf_period", sample->period);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_WEIGHT) {
ret = value_set_u64(cw, event, "perf_weight", sample->weight);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_DATA_SRC) {
ret = value_set_u64(cw, event, "perf_data_src",
sample->data_src);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_TRANSACTION) {
ret = value_set_u64(cw, event, "perf_transaction",
sample->transaction);
if (ret)
return -1;
}
return 0;
}
static int ctf_stream__flush(struct ctf_stream *cs)
{
int err = 0;
if (cs) {
err = bt_ctf_stream_flush(cs->stream);
if (err)
pr_err("CTF stream %d flush failed\n", cs->cpu);
pr("Flush stream for cpu %d (%u samples)\n",
cs->cpu, cs->count);
cs->count = 0;
}
return err;
}
static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
{
struct ctf_stream *cs;
struct bt_ctf_field *pkt_ctx = NULL;
struct bt_ctf_field *cpu_field = NULL;
struct bt_ctf_stream *stream = NULL;
int ret;
cs = zalloc(sizeof(*cs));
if (!cs) {
pr_err("Failed to allocate ctf stream\n");
return NULL;
}
stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
if (!stream) {
pr_err("Failed to create CTF stream\n");
goto out;
}
pkt_ctx = bt_ctf_stream_get_packet_context(stream);
if (!pkt_ctx) {
pr_err("Failed to obtain packet context\n");
goto out;
}
cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
bt_ctf_field_put(pkt_ctx);
if (!cpu_field) {
pr_err("Failed to obtain cpu field\n");
goto out;
}
ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
if (ret) {
pr_err("Failed to update CPU number\n");
goto out;
}
bt_ctf_field_put(cpu_field);
cs->cpu = cpu;
cs->stream = stream;
return cs;
out:
if (cpu_field)
bt_ctf_field_put(cpu_field);
if (stream)
bt_ctf_stream_put(stream);
free(cs);
return NULL;
}
static void ctf_stream__delete(struct ctf_stream *cs)
{
if (cs) {
bt_ctf_stream_put(cs->stream);
free(cs);
}
}
static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
{
struct ctf_stream *cs = cw->stream[cpu];
if (!cs) {
cs = ctf_stream__create(cw, cpu);
cw->stream[cpu] = cs;
}
return cs;
}
static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
struct evsel *evsel)
{
int cpu = 0;
if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
cpu = sample->cpu;
if (cpu > cw->stream_cnt) {
pr_err("Event was recorded for CPU %d, limit is at %d.\n",
cpu, cw->stream_cnt);
cpu = 0;
}
return cpu;
}
#define STREAM_FLUSH_COUNT 100000
/*
* Currently we have no other way to determine the
* time for the stream flush other than keep track
* of the number of events and check it against
* threshold.
*/
static bool is_flush_needed(struct ctf_stream *cs)
{
return cs->count >= STREAM_FLUSH_COUNT;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *_event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine __maybe_unused)
{
struct convert *c = container_of(tool, struct convert, tool);
struct evsel_priv *priv = evsel->priv;
struct ctf_writer *cw = &c->writer;
struct ctf_stream *cs;
struct bt_ctf_event_class *event_class;
struct bt_ctf_event *event;
int ret;
unsigned long type = evsel->core.attr.sample_type;
if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
return 0;
event_class = priv->event_class;
/* update stats */
c->events_count++;
c->events_size += _event->header.size;
pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
event = bt_ctf_event_create(event_class);
if (!event) {
pr_err("Failed to create an CTF event\n");
return -1;
}
bt_ctf_clock_set_time(cw->clock, sample->time);
ret = add_generic_values(cw, event, evsel, sample);
if (ret)
return -1;
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
ret = add_tracepoint_values(cw, event_class, event,
evsel, sample);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_CALLCHAIN) {
ret = add_callchain_output_values(event_class,
event, sample->callchain);
if (ret)
return -1;
}
if (evsel__is_bpf_output(evsel)) {
ret = add_bpf_output_values(event_class, event, sample);
if (ret)
return -1;
}
cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
if (cs) {
if (is_flush_needed(cs))
ctf_stream__flush(cs);
cs->count++;
bt_ctf_stream_append_event(cs->stream, event);
}
bt_ctf_event_put(event);
return cs ? 0 : -1;
}
#define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
do { \
ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
if (ret) \
return -1; \
} while(0)
#define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
static int process_##_name##_event(struct perf_tool *tool, \
union perf_event *_event, \
struct perf_sample *sample, \
struct machine *machine) \
{ \
struct convert *c = container_of(tool, struct convert, tool);\
struct ctf_writer *cw = &c->writer; \
struct bt_ctf_event_class *event_class = cw->_name##_class;\
struct bt_ctf_event *event; \
struct ctf_stream *cs; \
int ret; \
\
c->non_sample_count++; \
c->events_size += _event->header.size; \
event = bt_ctf_event_create(event_class); \
if (!event) { \
pr_err("Failed to create an CTF event\n"); \
return -1; \
} \
\
bt_ctf_clock_set_time(cw->clock, sample->time); \
body \
cs = ctf_stream(cw, 0); \
if (cs) { \
if (is_flush_needed(cs)) \
ctf_stream__flush(cs); \
\
cs->count++; \
bt_ctf_stream_append_event(cs->stream, event); \
} \
bt_ctf_event_put(event); \
\
return perf_event__process_##_name(tool, _event, sample, machine);\
}
__FUNC_PROCESS_NON_SAMPLE(comm,
__NON_SAMPLE_SET_FIELD(comm, u32, pid);
__NON_SAMPLE_SET_FIELD(comm, u32, tid);
__NON_SAMPLE_SET_FIELD(comm, string, comm);
)
__FUNC_PROCESS_NON_SAMPLE(fork,
__NON_SAMPLE_SET_FIELD(fork, u32, pid);
__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
__NON_SAMPLE_SET_FIELD(fork, u32, tid);
__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
__NON_SAMPLE_SET_FIELD(fork, u64, time);
)
__FUNC_PROCESS_NON_SAMPLE(exit,
__NON_SAMPLE_SET_FIELD(fork, u32, pid);
__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
__NON_SAMPLE_SET_FIELD(fork, u32, tid);
__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
__NON_SAMPLE_SET_FIELD(fork, u64, time);
)
__FUNC_PROCESS_NON_SAMPLE(mmap,
__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
__NON_SAMPLE_SET_FIELD(mmap, string, filename);
)
__FUNC_PROCESS_NON_SAMPLE(mmap2,
__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
)
#undef __NON_SAMPLE_SET_FIELD
#undef __FUNC_PROCESS_NON_SAMPLE
/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
static char *change_name(char *name, char *orig_name, int dup)
{
char *new_name = NULL;
size_t len;
if (!name)
name = orig_name;
if (dup >= 10)
goto out;
/*
* Add '_' prefix to potential keywork. According to
* Mathieu Desnoyers (https://lore.kernel.org/lkml/[email protected]),
* further CTF spec updating may require us to use '$'.
*/
if (dup < 0)
len = strlen(name) + sizeof("_");
else
len = strlen(orig_name) + sizeof("_dupl_X");
new_name = malloc(len);
if (!new_name)
goto out;
if (dup < 0)
snprintf(new_name, len, "_%s", name);
else
snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
out:
if (name != orig_name)
free(name);
return new_name;
}
static int event_class_add_field(struct bt_ctf_event_class *event_class,
struct bt_ctf_field_type *type,
struct tep_format_field *field)
{
struct bt_ctf_field_type *t = NULL;
char *name;
int dup = 1;
int ret;
/* alias was already assigned */
if (field->alias != field->name)
return bt_ctf_event_class_add_field(event_class, type,
(char *)field->alias);
name = field->name;
/* If 'name' is a keywork, add prefix. */
if (bt_ctf_validate_identifier(name))
name = change_name(name, field->name, -1);
if (!name) {
pr_err("Failed to fix invalid identifier.");
return -1;
}
while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
bt_ctf_field_type_put(t);
name = change_name(name, field->name, dup++);
if (!name) {
pr_err("Failed to create dup name for '%s'\n", field->name);
return -1;
}
}
ret = bt_ctf_event_class_add_field(event_class, type, name);
if (!ret)
field->alias = name;
return ret;
}
static int add_tracepoint_fields_types(struct ctf_writer *cw,
struct tep_format_field *fields,
struct bt_ctf_event_class *event_class)
{
struct tep_format_field *field;
int ret;
for (field = fields; field; field = field->next) {
struct bt_ctf_field_type *type;
unsigned long flags = field->flags;
pr2(" field '%s'\n", field->name);
type = get_tracepoint_field_type(cw, field);
if (!type)
return -1;
/*
* A string is an array of chars. For this we use the string
* type and don't care that it is an array. What we don't
* support is an array of strings.
*/
if (flags & TEP_FIELD_IS_STRING)
flags &= ~TEP_FIELD_IS_ARRAY;
if (flags & TEP_FIELD_IS_ARRAY)
type = bt_ctf_field_type_array_create(type, field->arraylen);
ret = event_class_add_field(event_class, type, field);
if (flags & TEP_FIELD_IS_ARRAY)
bt_ctf_field_type_put(type);
if (ret) {
pr_err("Failed to add field '%s': %d\n",
field->name, ret);
return -1;
}
}
return 0;
}
static int add_tracepoint_types(struct ctf_writer *cw,
struct evsel *evsel,
struct bt_ctf_event_class *class)
{
struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
struct tep_format_field *fields = evsel->tp_format->format.fields;
int ret;
ret = add_tracepoint_fields_types(cw, common_fields, class);
if (!ret)
ret = add_tracepoint_fields_types(cw, fields, class);
return ret;
}
static int add_bpf_output_types(struct ctf_writer *cw,
struct bt_ctf_event_class *class)
{
struct bt_ctf_field_type *len_type = cw->data.u32;
struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
struct bt_ctf_field_type *seq_type;
int ret;
ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
if (ret)
return ret;
seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
if (!seq_type)
return -1;
return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
}
static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
struct bt_ctf_event_class *event_class)
{
u64 type = evsel->core.attr.sample_type;
/*
* missing:
* PERF_SAMPLE_TIME - not needed as we have it in
* ctf event header
* PERF_SAMPLE_READ - TODO
* PERF_SAMPLE_CALLCHAIN - TODO
* PERF_SAMPLE_RAW - tracepoint fields and BPF output
* are handled separately
* PERF_SAMPLE_BRANCH_STACK - TODO
* PERF_SAMPLE_REGS_USER - TODO
* PERF_SAMPLE_STACK_USER - TODO
*/
#define ADD_FIELD(cl, t, n) \
do { \
pr2(" field '%s'\n", n); \
if (bt_ctf_event_class_add_field(cl, t, n)) { \
pr_err("Failed to add field '%s';\n", n); \
return -1; \
} \
} while (0)
if (type & PERF_SAMPLE_IP)
ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
if (type & PERF_SAMPLE_TID) {
ADD_FIELD(event_class, cw->data.s32, "perf_tid");
ADD_FIELD(event_class, cw->data.s32, "perf_pid");
}
if ((type & PERF_SAMPLE_ID) ||
(type & PERF_SAMPLE_IDENTIFIER))
ADD_FIELD(event_class, cw->data.u64, "perf_id");
if (type & PERF_SAMPLE_STREAM_ID)
ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
if (type & PERF_SAMPLE_PERIOD)
ADD_FIELD(event_class, cw->data.u64, "perf_period");
if (type & PERF_SAMPLE_WEIGHT)
ADD_FIELD(event_class, cw->data.u64, "perf_weight");
if (type & PERF_SAMPLE_DATA_SRC)
ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
if (type & PERF_SAMPLE_TRANSACTION)
ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
if (type & PERF_SAMPLE_CALLCHAIN) {
ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
ADD_FIELD(event_class,
bt_ctf_field_type_sequence_create(
cw->data.u64_hex, "perf_callchain_size"),
"perf_callchain");
}
#undef ADD_FIELD
return 0;
}
static int add_event(struct ctf_writer *cw, struct evsel *evsel)
{
struct bt_ctf_event_class *event_class;
struct evsel_priv *priv;
const char *name = evsel__name(evsel);
int ret;
pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
event_class = bt_ctf_event_class_create(name);
if (!event_class)
return -1;
ret = add_generic_types(cw, evsel, event_class);
if (ret)
goto err;
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
ret = add_tracepoint_types(cw, evsel, event_class);
if (ret)
goto err;
}
if (evsel__is_bpf_output(evsel)) {
ret = add_bpf_output_types(cw, event_class);
if (ret)
goto err;
}
ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
if (ret) {
pr("Failed to add event class into stream.\n");
goto err;
}
priv = malloc(sizeof(*priv));
if (!priv)
goto err;
priv->event_class = event_class;
evsel->priv = priv;
return 0;
err:
bt_ctf_event_class_put(event_class);
pr_err("Failed to add event '%s'.\n", name);
return -1;
}
static int setup_events(struct ctf_writer *cw, struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel;
int ret;
evlist__for_each_entry(evlist, evsel) {
ret = add_event(cw, evsel);
if (ret)
return ret;
}
return 0;
}
#define __NON_SAMPLE_ADD_FIELD(t, n) \
do { \
pr2(" field '%s'\n", #n); \
if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
pr_err("Failed to add field '%s';\n", #n);\
return -1; \
} \
} while(0)
#define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
static int add_##_name##_event(struct ctf_writer *cw) \
{ \
struct bt_ctf_event_class *event_class; \
int ret; \
\
pr("Adding "#_name" event\n"); \
event_class = bt_ctf_event_class_create("perf_" #_name);\
if (!event_class) \
return -1; \
body \
\
ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
if (ret) { \
pr("Failed to add event class '"#_name"' into stream.\n");\
return ret; \
} \
\
cw->_name##_class = event_class; \
bt_ctf_event_class_put(event_class); \
return 0; \
}
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
__NON_SAMPLE_ADD_FIELD(u32, pid);
__NON_SAMPLE_ADD_FIELD(u32, tid);
__NON_SAMPLE_ADD_FIELD(string, comm);
)
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
__NON_SAMPLE_ADD_FIELD(u32, pid);
__NON_SAMPLE_ADD_FIELD(u32, ppid);
__NON_SAMPLE_ADD_FIELD(u32, tid);
__NON_SAMPLE_ADD_FIELD(u32, ptid);
__NON_SAMPLE_ADD_FIELD(u64, time);
)
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
__NON_SAMPLE_ADD_FIELD(u32, pid);
__NON_SAMPLE_ADD_FIELD(u32, ppid);
__NON_SAMPLE_ADD_FIELD(u32, tid);
__NON_SAMPLE_ADD_FIELD(u32, ptid);
__NON_SAMPLE_ADD_FIELD(u64, time);
)
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
__NON_SAMPLE_ADD_FIELD(u32, pid);
__NON_SAMPLE_ADD_FIELD(u32, tid);
__NON_SAMPLE_ADD_FIELD(u64_hex, start);
__NON_SAMPLE_ADD_FIELD(string, filename);
)
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
__NON_SAMPLE_ADD_FIELD(u32, pid);
__NON_SAMPLE_ADD_FIELD(u32, tid);
__NON_SAMPLE_ADD_FIELD(u64_hex, start);
__NON_SAMPLE_ADD_FIELD(string, filename);
)
#undef __NON_SAMPLE_ADD_FIELD
#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
static int setup_non_sample_events(struct ctf_writer *cw,
struct perf_session *session __maybe_unused)
{
int ret;
ret = add_comm_event(cw);
if (ret)
return ret;
ret = add_exit_event(cw);
if (ret)
return ret;
ret = add_fork_event(cw);
if (ret)
return ret;
ret = add_mmap_event(cw);
if (ret)
return ret;
ret = add_mmap2_event(cw);
if (ret)
return ret;
return 0;
}
static void cleanup_events(struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
struct evsel_priv *priv;
priv = evsel->priv;
bt_ctf_event_class_put(priv->event_class);
zfree(&evsel->priv);
}
evlist__delete(evlist);
session->evlist = NULL;
}
static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
{
struct ctf_stream **stream;
struct perf_header *ph = &session->header;
int ncpus;
/*
* Try to get the number of cpus used in the data file,
* if not present fallback to the MAX_CPUS.
*/
ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
stream = zalloc(sizeof(*stream) * ncpus);
if (!stream) {
pr_err("Failed to allocate streams.\n");
return -ENOMEM;
}
cw->stream = stream;
cw->stream_cnt = ncpus;
return 0;
}
static void free_streams(struct ctf_writer *cw)
{
int cpu;
for (cpu = 0; cpu < cw->stream_cnt; cpu++)
ctf_stream__delete(cw->stream[cpu]);
zfree(&cw->stream);
}
static int ctf_writer__setup_env(struct ctf_writer *cw,
struct perf_session *session)
{
struct perf_header *header = &session->header;
struct bt_ctf_writer *writer = cw->writer;
#define ADD(__n, __v) \
do { \
if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
return -1; \
} while (0)
ADD("host", header->env.hostname);
ADD("sysname", "Linux");
ADD("release", header->env.os_release);
ADD("version", header->env.version);
ADD("machine", header->env.arch);
ADD("domain", "kernel");
ADD("tracer_name", "perf");
#undef ADD
return 0;
}
static int ctf_writer__setup_clock(struct ctf_writer *cw,
struct perf_session *session,
bool tod)
{
struct bt_ctf_clock *clock = cw->clock;
const char *desc = "perf clock";
int64_t offset = 0;
if (tod) {
struct perf_env *env = &session->header.env;
if (!env->clock.enabled) {
pr_err("Can't provide --tod time, missing clock data. "
"Please record with -k/--clockid option.\n");
return -1;
}
desc = clockid_name(env->clock.clockid);
offset = env->clock.tod_ns - env->clock.clockid_ns;
}
#define SET(__n, __v) \
do { \
if (bt_ctf_clock_set_##__n(clock, __v)) \
return -1; \
} while (0)
SET(frequency, 1000000000);
SET(offset, offset);
SET(description, desc);
SET(precision, 10);
SET(is_absolute, 0);
#undef SET
return 0;
}
static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
{
struct bt_ctf_field_type *type;
type = bt_ctf_field_type_integer_create(size);
if (!type)
return NULL;
if (sign &&
bt_ctf_field_type_integer_set_signed(type, 1))
goto err;
if (hex &&
bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
goto err;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
#else
bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
#endif
pr2("Created type: INTEGER %d-bit %ssigned %s\n",
size, sign ? "un" : "", hex ? "hex" : "");
return type;
err:
bt_ctf_field_type_put(type);
return NULL;
}
static void ctf_writer__cleanup_data(struct ctf_writer *cw)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
bt_ctf_field_type_put(cw->data.array[i]);
}
static int ctf_writer__init_data(struct ctf_writer *cw)
{
#define CREATE_INT_TYPE(type, size, sign, hex) \
do { \
(type) = create_int_type(size, sign, hex); \
if (!(type)) \
goto err; \
} while (0)
CREATE_INT_TYPE(cw->data.s64, 64, true, false);
CREATE_INT_TYPE(cw->data.u64, 64, false, false);
CREATE_INT_TYPE(cw->data.s32, 32, true, false);
CREATE_INT_TYPE(cw->data.u32, 32, false, false);
CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
cw->data.string = bt_ctf_field_type_string_create();
if (cw->data.string)
return 0;
err:
ctf_writer__cleanup_data(cw);
pr_err("Failed to create data types.\n");
return -1;
}
static void ctf_writer__cleanup(struct ctf_writer *cw)
{
ctf_writer__cleanup_data(cw);
bt_ctf_clock_put(cw->clock);
free_streams(cw);
bt_ctf_stream_class_put(cw->stream_class);
bt_ctf_writer_put(cw->writer);
/* and NULL all the pointers */
memset(cw, 0, sizeof(*cw));
}
static int ctf_writer__init(struct ctf_writer *cw, const char *path,
struct perf_session *session, bool tod)
{
struct bt_ctf_writer *writer;
struct bt_ctf_stream_class *stream_class;
struct bt_ctf_clock *clock;
struct bt_ctf_field_type *pkt_ctx_type;
int ret;
/* CTF writer */
writer = bt_ctf_writer_create(path);
if (!writer)
goto err;
cw->writer = writer;
/* CTF clock */
clock = bt_ctf_clock_create("perf_clock");
if (!clock) {
pr("Failed to create CTF clock.\n");
goto err_cleanup;
}
cw->clock = clock;
if (ctf_writer__setup_clock(cw, session, tod)) {
pr("Failed to setup CTF clock.\n");
goto err_cleanup;
}
/* CTF stream class */
stream_class = bt_ctf_stream_class_create("perf_stream");
if (!stream_class) {
pr("Failed to create CTF stream class.\n");
goto err_cleanup;
}
cw->stream_class = stream_class;
/* CTF clock stream setup */
if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
pr("Failed to assign CTF clock to stream class.\n");
goto err_cleanup;
}
if (ctf_writer__init_data(cw))
goto err_cleanup;
/* Add cpu_id for packet context */
pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
if (!pkt_ctx_type)
goto err_cleanup;
ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
bt_ctf_field_type_put(pkt_ctx_type);
if (ret)
goto err_cleanup;
/* CTF clock writer setup */
if (bt_ctf_writer_add_clock(writer, clock)) {
pr("Failed to assign CTF clock to writer.\n");
goto err_cleanup;
}
return 0;
err_cleanup:
ctf_writer__cleanup(cw);
err:
pr_err("Failed to setup CTF writer.\n");
return -1;
}
static int ctf_writer__flush_streams(struct ctf_writer *cw)
{
int cpu, ret = 0;
for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
ret = ctf_stream__flush(cw->stream[cpu]);
return ret;
}
static int convert__config(const char *var, const char *value, void *cb)
{
struct convert *c = cb;
if (!strcmp(var, "convert.queue-size"))
return perf_config_u64(&c->queue_size, var, value);
return 0;
}
int bt_convert__perf2ctf(const char *input, const char *path,
struct perf_data_convert_opts *opts)
{
struct perf_session *session;
struct perf_data data = {
.path = input,
.mode = PERF_DATA_MODE_READ,
.force = opts->force,
};
struct convert c = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
.namespaces = perf_event__process_namespaces,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
};
struct ctf_writer *cw = &c.writer;
int err;
if (opts->all) {
c.tool.comm = process_comm_event;
c.tool.exit = process_exit_event;
c.tool.fork = process_fork_event;
c.tool.mmap = process_mmap_event;
c.tool.mmap2 = process_mmap2_event;
}
err = perf_config(convert__config, &c);
if (err)
return err;
err = -1;
/* perf.data session */
session = perf_session__new(&data, &c.tool);
if (IS_ERR(session))
return PTR_ERR(session);
/* CTF writer */
if (ctf_writer__init(cw, path, session, opts->tod))
goto free_session;
if (c.queue_size) {
ordered_events__set_alloc_size(&session->ordered_events,
c.queue_size);
}
/* CTF writer env/clock setup */
if (ctf_writer__setup_env(cw, session))
goto free_writer;
/* CTF events setup */
if (setup_events(cw, session))
goto free_writer;
if (opts->all && setup_non_sample_events(cw, session))
goto free_writer;
if (setup_streams(cw, session))
goto free_writer;
err = perf_session__process_events(session);
if (!err)
err = ctf_writer__flush_streams(cw);
else
pr_err("Error during conversion.\n");
fprintf(stderr,
"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
data.path, path);
fprintf(stderr,
"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
(double) c.events_size / 1024.0 / 1024.0,
c.events_count);
if (!c.non_sample_count)
fprintf(stderr, ") ]\n");
else
fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
cleanup_events(session);
perf_session__delete(session);
ctf_writer__cleanup(cw);
return err;
free_writer:
ctf_writer__cleanup(cw);
free_session:
perf_session__delete(session);
pr_err("Error during conversion setup.\n");
return err;
}
| linux-master | tools/perf/util/data-convert-bt.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "util/compress.h"
#include "util/debug.h"
int zstd_init(struct zstd_data *data, int level)
{
size_t ret;
data->dstream = ZSTD_createDStream();
if (data->dstream == NULL) {
pr_err("Couldn't create decompression stream.\n");
return -1;
}
ret = ZSTD_initDStream(data->dstream);
if (ZSTD_isError(ret)) {
pr_err("Failed to initialize decompression stream: %s\n", ZSTD_getErrorName(ret));
return -1;
}
if (!level)
return 0;
data->cstream = ZSTD_createCStream();
if (data->cstream == NULL) {
pr_err("Couldn't create compression stream.\n");
return -1;
}
ret = ZSTD_initCStream(data->cstream, level);
if (ZSTD_isError(ret)) {
pr_err("Failed to initialize compression stream: %s\n", ZSTD_getErrorName(ret));
return -1;
}
return 0;
}
int zstd_fini(struct zstd_data *data)
{
if (data->dstream) {
ZSTD_freeDStream(data->dstream);
data->dstream = NULL;
}
if (data->cstream) {
ZSTD_freeCStream(data->cstream);
data->cstream = NULL;
}
return 0;
}
size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
void *src, size_t src_size, size_t max_record_size,
size_t process_header(void *record, size_t increment))
{
size_t ret, size, compressed = 0;
ZSTD_inBuffer input = { src, src_size, 0 };
ZSTD_outBuffer output;
void *record;
while (input.pos < input.size) {
record = dst;
size = process_header(record, 0);
compressed += size;
dst += size;
dst_size -= size;
output = (ZSTD_outBuffer){ dst, (dst_size > max_record_size) ?
max_record_size : dst_size, 0 };
ret = ZSTD_compressStream(data->cstream, &output, &input);
ZSTD_flushStream(data->cstream, &output);
if (ZSTD_isError(ret)) {
pr_err("failed to compress %ld bytes: %s\n",
(long)src_size, ZSTD_getErrorName(ret));
memcpy(dst, src, src_size);
return src_size;
}
size = output.pos;
size = process_header(record, size);
compressed += size;
dst += size;
dst_size -= size;
}
return compressed;
}
size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size,
void *dst, size_t dst_size)
{
size_t ret;
ZSTD_inBuffer input = { src, src_size, 0 };
ZSTD_outBuffer output = { dst, dst_size, 0 };
while (input.pos < input.size) {
ret = ZSTD_decompressStream(data->dstream, &output, &input);
if (ZSTD_isError(ret)) {
pr_err("failed to decompress (B): %zd -> %zd, dst_size %zd : %s\n",
src_size, output.size, dst_size, ZSTD_getErrorName(ret));
break;
}
output.dst = dst + output.pos;
output.size = dst_size - output.pos;
}
return output.pos;
}
| linux-master | tools/perf/util/zstd.c |
// SPDX-License-Identifier: GPL-2.0
#include <Python.h>
#include <structmember.h>
#include <inttypes.h>
#include <poll.h>
#include <linux/err.h>
#include <perf/cpumap.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
#include <perf/mmap.h>
#include "evlist.h"
#include "callchain.h"
#include "evsel.h"
#include "event.h"
#include "print_binary.h"
#include "thread_map.h"
#include "trace-event.h"
#include "mmap.h"
#include "stat.h"
#include "metricgroup.h"
#include "util/bpf-filter.h"
#include "util/env.h"
#include "util/pmu.h"
#include "util/pmus.h"
#include <internal/lib.h>
#include "util.h"
#if PY_MAJOR_VERSION < 3
#define _PyUnicode_FromString(arg) \
PyString_FromString(arg)
#define _PyUnicode_AsString(arg) \
PyString_AsString(arg)
#define _PyUnicode_FromFormat(...) \
PyString_FromFormat(__VA_ARGS__)
#define _PyLong_FromLong(arg) \
PyInt_FromLong(arg)
#else
#define _PyUnicode_FromString(arg) \
PyUnicode_FromString(arg)
#define _PyUnicode_FromFormat(...) \
PyUnicode_FromFormat(__VA_ARGS__)
#define _PyLong_FromLong(arg) \
PyLong_FromLong(arg)
#endif
#ifndef Py_TYPE
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#endif
/*
* Avoid bringing in event parsing.
*/
int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
{
return 0;
}
/*
* Provide these two so that we don't have to link against callchain.c and
* start dragging hist.c, etc.
*/
struct callchain_param callchain_param;
int parse_callchain_record(const char *arg __maybe_unused,
struct callchain_param *param __maybe_unused)
{
return 0;
}
/*
* Add these not to drag util/env.c
*/
struct perf_env perf_env;
const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
{
return NULL;
}
// This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
const char *perf_env__arch(struct perf_env *env __maybe_unused)
{
return NULL;
}
/*
* These ones are needed not to drag the PMU bandwagon, jevents generated
* pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
* doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
* far, for the perf python binding known usecases, revisit if this become
* necessary.
*/
struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
{
return NULL;
}
int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...)
{
return EOF;
}
int perf_pmus__num_core_pmus(void)
{
return 1;
}
bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
{
return false;
}
bool perf_pmus__supports_extended_type(void)
{
return false;
}
/*
* Add this one here not to drag util/metricgroup.c
*/
int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
struct rblist *new_metric_events,
struct rblist *old_metric_events)
{
return 0;
}
/*
* Add this one here not to drag util/trace-event-info.c
*/
char *tracepoint_id_to_name(u64 config)
{
return NULL;
}
/*
* XXX: All these evsel destructors need some better mechanism, like a linked
* list of destructors registered when the relevant code indeed is used instead
* of having more and more calls in perf_evsel__delete(). -- acme
*
* For now, add some more:
*
* Not to drag the BPF bandwagon...
*/
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
int bpf_counter__disable(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
{
}
int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
{
return 0;
}
int bpf_counter__disable(struct evsel *evsel __maybe_unused)
{
return 0;
}
// not to drag util/bpf-filter.c
#ifdef HAVE_BPF_SKEL
int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
{
return 0;
}
int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
{
return 0;
}
#endif
/*
* Support debug printing even though util/debug.c is not linked. That means
* implementing 'verbose' and 'eprintf'.
*/
int verbose;
int debug_peo_args;
int eprintf(int level, int var, const char *fmt, ...);
int eprintf(int level, int var, const char *fmt, ...)
{
va_list args;
int ret = 0;
if (var >= level) {
va_start(args, fmt);
ret = vfprintf(stderr, fmt, args);
va_end(args);
}
return ret;
}
/* Define PyVarObject_HEAD_INIT for python 2.5 */
#ifndef PyVarObject_HEAD_INIT
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
#endif
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initperf(void);
#else
PyMODINIT_FUNC PyInit_perf(void);
#endif
#define member_def(type, member, ptype, help) \
{ #member, ptype, \
offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
0, help }
#define sample_member_def(name, member, ptype, help) \
{ #name, ptype, \
offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
0, help }
struct pyrf_event {
PyObject_HEAD
struct evsel *evsel;
struct perf_sample sample;
union perf_event event;
};
#define sample_members \
sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
sample_member_def(sample_pid, pid, T_INT, "event pid"), \
sample_member_def(sample_tid, tid, T_INT, "event tid"), \
sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
static PyMemberDef pyrf_mmap_event__members[] = {
sample_members
member_def(perf_event_header, type, T_UINT, "event type"),
member_def(perf_event_header, misc, T_UINT, "event misc"),
member_def(perf_record_mmap, pid, T_UINT, "event pid"),
member_def(perf_record_mmap, tid, T_UINT, "event tid"),
member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
{ .name = NULL, },
};
static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
"length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
"filename: %s }",
pevent->event.mmap.pid, pevent->event.mmap.tid,
pevent->event.mmap.start, pevent->event.mmap.len,
pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
ret = PyErr_NoMemory();
} else {
ret = _PyUnicode_FromString(s);
free(s);
}
return ret;
}
static PyTypeObject pyrf_mmap_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.mmap_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_mmap_event__doc,
.tp_members = pyrf_mmap_event__members,
.tp_repr = (reprfunc)pyrf_mmap_event__repr,
};
static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
static PyMemberDef pyrf_task_event__members[] = {
sample_members
member_def(perf_event_header, type, T_UINT, "event type"),
member_def(perf_record_fork, pid, T_UINT, "event pid"),
member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
member_def(perf_record_fork, tid, T_UINT, "event tid"),
member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
{ .name = NULL, },
};
static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
{
return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
"ptid: %u, time: %" PRI_lu64 "}",
pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
pevent->event.fork.pid,
pevent->event.fork.ppid,
pevent->event.fork.tid,
pevent->event.fork.ptid,
pevent->event.fork.time);
}
static PyTypeObject pyrf_task_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.task_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_task_event__doc,
.tp_members = pyrf_task_event__members,
.tp_repr = (reprfunc)pyrf_task_event__repr,
};
static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
static PyMemberDef pyrf_comm_event__members[] = {
sample_members
member_def(perf_event_header, type, T_UINT, "event type"),
member_def(perf_record_comm, pid, T_UINT, "event pid"),
member_def(perf_record_comm, tid, T_UINT, "event tid"),
member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
{ .name = NULL, },
};
static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
{
return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
pevent->event.comm.pid,
pevent->event.comm.tid,
pevent->event.comm.comm);
}
static PyTypeObject pyrf_comm_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.comm_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_comm_event__doc,
.tp_members = pyrf_comm_event__members,
.tp_repr = (reprfunc)pyrf_comm_event__repr,
};
static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
static PyMemberDef pyrf_throttle_event__members[] = {
sample_members
member_def(perf_event_header, type, T_UINT, "event type"),
member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
{ .name = NULL, },
};
static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
{
struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
", stream_id: %" PRI_lu64 " }",
pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
te->time, te->id, te->stream_id);
}
static PyTypeObject pyrf_throttle_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.throttle_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_throttle_event__doc,
.tp_members = pyrf_throttle_event__members,
.tp_repr = (reprfunc)pyrf_throttle_event__repr,
};
static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
static PyMemberDef pyrf_lost_event__members[] = {
sample_members
member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
{ .name = NULL, },
};
static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
"lost: %#" PRI_lx64 " }",
pevent->event.lost.id, pevent->event.lost.lost) < 0) {
ret = PyErr_NoMemory();
} else {
ret = _PyUnicode_FromString(s);
free(s);
}
return ret;
}
static PyTypeObject pyrf_lost_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.lost_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_lost_event__doc,
.tp_members = pyrf_lost_event__members,
.tp_repr = (reprfunc)pyrf_lost_event__repr,
};
static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
static PyMemberDef pyrf_read_event__members[] = {
sample_members
member_def(perf_record_read, pid, T_UINT, "event pid"),
member_def(perf_record_read, tid, T_UINT, "event tid"),
{ .name = NULL, },
};
static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
{
return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
pevent->event.read.pid,
pevent->event.read.tid);
/*
* FIXME: return the array of read values,
* making this method useful ;-)
*/
}
static PyTypeObject pyrf_read_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.read_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_read_event__doc,
.tp_members = pyrf_read_event__members,
.tp_repr = (reprfunc)pyrf_read_event__repr,
};
static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
static PyMemberDef pyrf_sample_event__members[] = {
sample_members
member_def(perf_event_header, type, T_UINT, "event type"),
{ .name = NULL, },
};
static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
if (asprintf(&s, "{ type: sample }") < 0) {
ret = PyErr_NoMemory();
} else {
ret = _PyUnicode_FromString(s);
free(s);
}
return ret;
}
#ifdef HAVE_LIBTRACEEVENT
static bool is_tracepoint(struct pyrf_event *pevent)
{
return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
}
static PyObject*
tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
{
struct tep_handle *pevent = field->event->tep;
void *data = pe->sample.raw_data;
PyObject *ret = NULL;
unsigned long long val;
unsigned int offset, len;
if (field->flags & TEP_FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
val = tep_read_number(pevent, data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
if (tep_field_is_relative(field->flags))
offset += field->offset + field->size;
}
if (field->flags & TEP_FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
ret = _PyUnicode_FromString((char *)data + offset);
} else {
ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
field->flags &= ~TEP_FIELD_IS_STRING;
}
} else {
val = tep_read_number(pevent, data + field->offset,
field->size);
if (field->flags & TEP_FIELD_IS_POINTER)
ret = PyLong_FromUnsignedLong((unsigned long) val);
else if (field->flags & TEP_FIELD_IS_SIGNED)
ret = PyLong_FromLong((long) val);
else
ret = PyLong_FromUnsignedLong((unsigned long) val);
}
return ret;
}
static PyObject*
get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
{
const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
struct evsel *evsel = pevent->evsel;
struct tep_format_field *field;
if (!evsel->tp_format) {
struct tep_event *tp_format;
tp_format = trace_event__tp_format_id(evsel->core.attr.config);
if (IS_ERR_OR_NULL(tp_format))
return NULL;
evsel->tp_format = tp_format;
}
field = tep_find_any_field(evsel->tp_format, str);
if (!field)
return NULL;
return tracepoint_field(pevent, field);
}
#endif /* HAVE_LIBTRACEEVENT */
static PyObject*
pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
{
PyObject *obj = NULL;
#ifdef HAVE_LIBTRACEEVENT
if (is_tracepoint(pevent))
obj = get_tracepoint_field(pevent, attr_name);
#endif
return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
}
static PyTypeObject pyrf_sample_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.sample_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_sample_event__doc,
.tp_members = pyrf_sample_event__members,
.tp_repr = (reprfunc)pyrf_sample_event__repr,
.tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
};
static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
static PyMemberDef pyrf_context_switch_event__members[] = {
sample_members
member_def(perf_event_header, type, T_UINT, "event type"),
member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
{ .name = NULL, },
};
static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
pevent->event.context_switch.next_prev_pid,
pevent->event.context_switch.next_prev_tid,
!!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
ret = PyErr_NoMemory();
} else {
ret = _PyUnicode_FromString(s);
free(s);
}
return ret;
}
static PyTypeObject pyrf_context_switch_event__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.context_switch_event",
.tp_basicsize = sizeof(struct pyrf_event),
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_context_switch_event__doc,
.tp_members = pyrf_context_switch_event__members,
.tp_repr = (reprfunc)pyrf_context_switch_event__repr,
};
static int pyrf_event__setup_types(void)
{
int err;
pyrf_mmap_event__type.tp_new =
pyrf_task_event__type.tp_new =
pyrf_comm_event__type.tp_new =
pyrf_lost_event__type.tp_new =
pyrf_read_event__type.tp_new =
pyrf_sample_event__type.tp_new =
pyrf_context_switch_event__type.tp_new =
pyrf_throttle_event__type.tp_new = PyType_GenericNew;
err = PyType_Ready(&pyrf_mmap_event__type);
if (err < 0)
goto out;
err = PyType_Ready(&pyrf_lost_event__type);
if (err < 0)
goto out;
err = PyType_Ready(&pyrf_task_event__type);
if (err < 0)
goto out;
err = PyType_Ready(&pyrf_comm_event__type);
if (err < 0)
goto out;
err = PyType_Ready(&pyrf_throttle_event__type);
if (err < 0)
goto out;
err = PyType_Ready(&pyrf_read_event__type);
if (err < 0)
goto out;
err = PyType_Ready(&pyrf_sample_event__type);
if (err < 0)
goto out;
err = PyType_Ready(&pyrf_context_switch_event__type);
if (err < 0)
goto out;
out:
return err;
}
static PyTypeObject *pyrf_event__type[] = {
[PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
[PERF_RECORD_LOST] = &pyrf_lost_event__type,
[PERF_RECORD_COMM] = &pyrf_comm_event__type,
[PERF_RECORD_EXIT] = &pyrf_task_event__type,
[PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
[PERF_RECORD_FORK] = &pyrf_task_event__type,
[PERF_RECORD_READ] = &pyrf_read_event__type,
[PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
[PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
[PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
};
static PyObject *pyrf_event__new(union perf_event *event)
{
struct pyrf_event *pevent;
PyTypeObject *ptype;
if ((event->header.type < PERF_RECORD_MMAP ||
event->header.type > PERF_RECORD_SAMPLE) &&
!(event->header.type == PERF_RECORD_SWITCH ||
event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
return NULL;
ptype = pyrf_event__type[event->header.type];
pevent = PyObject_New(struct pyrf_event, ptype);
if (pevent != NULL)
memcpy(&pevent->event, event, event->header.size);
return (PyObject *)pevent;
}
struct pyrf_cpu_map {
PyObject_HEAD
struct perf_cpu_map *cpus;
};
static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
PyObject *args, PyObject *kwargs)
{
static char *kwlist[] = { "cpustr", NULL };
char *cpustr = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
kwlist, &cpustr))
return -1;
pcpus->cpus = perf_cpu_map__new(cpustr);
if (pcpus->cpus == NULL)
return -1;
return 0;
}
static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
{
perf_cpu_map__put(pcpus->cpus);
Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
}
static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
{
struct pyrf_cpu_map *pcpus = (void *)obj;
return perf_cpu_map__nr(pcpus->cpus);
}
static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
{
struct pyrf_cpu_map *pcpus = (void *)obj;
if (i >= perf_cpu_map__nr(pcpus->cpus))
return NULL;
return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
}
static PySequenceMethods pyrf_cpu_map__sequence_methods = {
.sq_length = pyrf_cpu_map__length,
.sq_item = pyrf_cpu_map__item,
};
static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
static PyTypeObject pyrf_cpu_map__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.cpu_map",
.tp_basicsize = sizeof(struct pyrf_cpu_map),
.tp_dealloc = (destructor)pyrf_cpu_map__delete,
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_cpu_map__doc,
.tp_as_sequence = &pyrf_cpu_map__sequence_methods,
.tp_init = (initproc)pyrf_cpu_map__init,
};
static int pyrf_cpu_map__setup_types(void)
{
pyrf_cpu_map__type.tp_new = PyType_GenericNew;
return PyType_Ready(&pyrf_cpu_map__type);
}
struct pyrf_thread_map {
PyObject_HEAD
struct perf_thread_map *threads;
};
static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
PyObject *args, PyObject *kwargs)
{
static char *kwlist[] = { "pid", "tid", "uid", NULL };
int pid = -1, tid = -1, uid = UINT_MAX;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
kwlist, &pid, &tid, &uid))
return -1;
pthreads->threads = thread_map__new(pid, tid, uid);
if (pthreads->threads == NULL)
return -1;
return 0;
}
static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
{
perf_thread_map__put(pthreads->threads);
Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
}
static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
{
struct pyrf_thread_map *pthreads = (void *)obj;
return perf_thread_map__nr(pthreads->threads);
}
static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
{
struct pyrf_thread_map *pthreads = (void *)obj;
if (i >= perf_thread_map__nr(pthreads->threads))
return NULL;
return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
}
static PySequenceMethods pyrf_thread_map__sequence_methods = {
.sq_length = pyrf_thread_map__length,
.sq_item = pyrf_thread_map__item,
};
static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
static PyTypeObject pyrf_thread_map__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.thread_map",
.tp_basicsize = sizeof(struct pyrf_thread_map),
.tp_dealloc = (destructor)pyrf_thread_map__delete,
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_thread_map__doc,
.tp_as_sequence = &pyrf_thread_map__sequence_methods,
.tp_init = (initproc)pyrf_thread_map__init,
};
static int pyrf_thread_map__setup_types(void)
{
pyrf_thread_map__type.tp_new = PyType_GenericNew;
return PyType_Ready(&pyrf_thread_map__type);
}
struct pyrf_evsel {
PyObject_HEAD
struct evsel evsel;
};
static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
PyObject *args, PyObject *kwargs)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
};
static char *kwlist[] = {
"type",
"config",
"sample_freq",
"sample_period",
"sample_type",
"read_format",
"disabled",
"inherit",
"pinned",
"exclusive",
"exclude_user",
"exclude_kernel",
"exclude_hv",
"exclude_idle",
"mmap",
"context_switch",
"comm",
"freq",
"inherit_stat",
"enable_on_exec",
"task",
"watermark",
"precise_ip",
"mmap_data",
"sample_id_all",
"wakeup_events",
"bp_type",
"bp_addr",
"bp_len",
NULL
};
u64 sample_period = 0;
u32 disabled = 0,
inherit = 0,
pinned = 0,
exclusive = 0,
exclude_user = 0,
exclude_kernel = 0,
exclude_hv = 0,
exclude_idle = 0,
mmap = 0,
context_switch = 0,
comm = 0,
freq = 1,
inherit_stat = 0,
enable_on_exec = 0,
task = 0,
watermark = 0,
precise_ip = 0,
mmap_data = 0,
sample_id_all = 1;
int idx = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs,
"|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
&attr.type, &attr.config, &attr.sample_freq,
&sample_period, &attr.sample_type,
&attr.read_format, &disabled, &inherit,
&pinned, &exclusive, &exclude_user,
&exclude_kernel, &exclude_hv, &exclude_idle,
&mmap, &context_switch, &comm, &freq, &inherit_stat,
&enable_on_exec, &task, &watermark,
&precise_ip, &mmap_data, &sample_id_all,
&attr.wakeup_events, &attr.bp_type,
&attr.bp_addr, &attr.bp_len, &idx))
return -1;
/* union... */
if (sample_period != 0) {
if (attr.sample_freq != 0)
return -1; /* FIXME: throw right exception */
attr.sample_period = sample_period;
}
/* Bitfields */
attr.disabled = disabled;
attr.inherit = inherit;
attr.pinned = pinned;
attr.exclusive = exclusive;
attr.exclude_user = exclude_user;
attr.exclude_kernel = exclude_kernel;
attr.exclude_hv = exclude_hv;
attr.exclude_idle = exclude_idle;
attr.mmap = mmap;
attr.context_switch = context_switch;
attr.comm = comm;
attr.freq = freq;
attr.inherit_stat = inherit_stat;
attr.enable_on_exec = enable_on_exec;
attr.task = task;
attr.watermark = watermark;
attr.precise_ip = precise_ip;
attr.mmap_data = mmap_data;
attr.sample_id_all = sample_id_all;
attr.size = sizeof(attr);
evsel__init(&pevsel->evsel, &attr, idx);
return 0;
}
static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
{
evsel__exit(&pevsel->evsel);
Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
}
static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
PyObject *args, PyObject *kwargs)
{
struct evsel *evsel = &pevsel->evsel;
struct perf_cpu_map *cpus = NULL;
struct perf_thread_map *threads = NULL;
PyObject *pcpus = NULL, *pthreads = NULL;
int group = 0, inherit = 0;
static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
&pcpus, &pthreads, &group, &inherit))
return NULL;
if (pthreads != NULL)
threads = ((struct pyrf_thread_map *)pthreads)->threads;
if (pcpus != NULL)
cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
evsel->core.attr.inherit = inherit;
/*
* This will group just the fds for this single evsel, to group
* multiple events, use evlist.open().
*/
if (evsel__open(evsel, cpus, threads) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef pyrf_evsel__methods[] = {
{
.ml_name = "open",
.ml_meth = (PyCFunction)pyrf_evsel__open,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("open the event selector file descriptor table.")
},
{ .ml_name = NULL, }
};
static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
static PyTypeObject pyrf_evsel__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.evsel",
.tp_basicsize = sizeof(struct pyrf_evsel),
.tp_dealloc = (destructor)pyrf_evsel__delete,
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_doc = pyrf_evsel__doc,
.tp_methods = pyrf_evsel__methods,
.tp_init = (initproc)pyrf_evsel__init,
};
static int pyrf_evsel__setup_types(void)
{
pyrf_evsel__type.tp_new = PyType_GenericNew;
return PyType_Ready(&pyrf_evsel__type);
}
struct pyrf_evlist {
PyObject_HEAD
struct evlist evlist;
};
static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
PyObject *args, PyObject *kwargs __maybe_unused)
{
PyObject *pcpus = NULL, *pthreads = NULL;
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
return -1;
threads = ((struct pyrf_thread_map *)pthreads)->threads;
cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
evlist__init(&pevlist->evlist, cpus, threads);
return 0;
}
static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
{
evlist__exit(&pevlist->evlist);
Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
}
static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
PyObject *args, PyObject *kwargs)
{
struct evlist *evlist = &pevlist->evlist;
static char *kwlist[] = { "pages", "overwrite", NULL };
int pages = 128, overwrite = false;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
&pages, &overwrite))
return NULL;
if (evlist__mmap(evlist, pages) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
PyObject *args, PyObject *kwargs)
{
struct evlist *evlist = &pevlist->evlist;
static char *kwlist[] = { "timeout", NULL };
int timeout = -1, n;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
return NULL;
n = evlist__poll(evlist, timeout);
if (n < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
return Py_BuildValue("i", n);
}
static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
PyObject *args __maybe_unused,
PyObject *kwargs __maybe_unused)
{
struct evlist *evlist = &pevlist->evlist;
PyObject *list = PyList_New(0);
int i;
for (i = 0; i < evlist->core.pollfd.nr; ++i) {
PyObject *file;
#if PY_MAJOR_VERSION < 3
FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
if (fp == NULL)
goto free_list;
file = PyFile_FromFile(fp, "perf", "r", NULL);
#else
file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
NULL, NULL, NULL, 0);
#endif
if (file == NULL)
goto free_list;
if (PyList_Append(list, file) != 0) {
Py_DECREF(file);
goto free_list;
}
Py_DECREF(file);
}
return list;
free_list:
return PyErr_NoMemory();
}
static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
PyObject *args,
PyObject *kwargs __maybe_unused)
{
struct evlist *evlist = &pevlist->evlist;
PyObject *pevsel;
struct evsel *evsel;
if (!PyArg_ParseTuple(args, "O", &pevsel))
return NULL;
Py_INCREF(pevsel);
evsel = &((struct pyrf_evsel *)pevsel)->evsel;
evsel->core.idx = evlist->core.nr_entries;
evlist__add(evlist, evsel);
return Py_BuildValue("i", evlist->core.nr_entries);
}
static struct mmap *get_md(struct evlist *evlist, int cpu)
{
int i;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *md = &evlist->mmap[i];
if (md->core.cpu.cpu == cpu)
return md;
}
return NULL;
}
static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
PyObject *args, PyObject *kwargs)
{
struct evlist *evlist = &pevlist->evlist;
union perf_event *event;
int sample_id_all = 1, cpu;
static char *kwlist[] = { "cpu", "sample_id_all", NULL };
struct mmap *md;
int err;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
&cpu, &sample_id_all))
return NULL;
md = get_md(evlist, cpu);
if (!md)
return NULL;
if (perf_mmap__read_init(&md->core) < 0)
goto end;
event = perf_mmap__read_event(&md->core);
if (event != NULL) {
PyObject *pyevent = pyrf_event__new(event);
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
struct evsel *evsel;
if (pyevent == NULL)
return PyErr_NoMemory();
evsel = evlist__event2evsel(evlist, event);
if (!evsel) {
Py_INCREF(Py_None);
return Py_None;
}
pevent->evsel = evsel;
err = evsel__parse_sample(evsel, event, &pevent->sample);
/* Consume the even only after we parsed it out. */
perf_mmap__consume(&md->core);
if (err)
return PyErr_Format(PyExc_OSError,
"perf: can't parse sample, err=%d", err);
return pyevent;
}
end:
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
PyObject *args, PyObject *kwargs)
{
struct evlist *evlist = &pevlist->evlist;
if (evlist__open(evlist) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef pyrf_evlist__methods[] = {
{
.ml_name = "mmap",
.ml_meth = (PyCFunction)pyrf_evlist__mmap,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("mmap the file descriptor table.")
},
{
.ml_name = "open",
.ml_meth = (PyCFunction)pyrf_evlist__open,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("open the file descriptors.")
},
{
.ml_name = "poll",
.ml_meth = (PyCFunction)pyrf_evlist__poll,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("poll the file descriptor table.")
},
{
.ml_name = "get_pollfd",
.ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("get the poll file descriptor table.")
},
{
.ml_name = "add",
.ml_meth = (PyCFunction)pyrf_evlist__add,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("adds an event selector to the list.")
},
{
.ml_name = "read_on_cpu",
.ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("reads an event.")
},
{ .ml_name = NULL, }
};
static Py_ssize_t pyrf_evlist__length(PyObject *obj)
{
struct pyrf_evlist *pevlist = (void *)obj;
return pevlist->evlist.core.nr_entries;
}
static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
{
struct pyrf_evlist *pevlist = (void *)obj;
struct evsel *pos;
if (i >= pevlist->evlist.core.nr_entries)
return NULL;
evlist__for_each_entry(&pevlist->evlist, pos) {
if (i-- == 0)
break;
}
return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
}
static PySequenceMethods pyrf_evlist__sequence_methods = {
.sq_length = pyrf_evlist__length,
.sq_item = pyrf_evlist__item,
};
static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
static PyTypeObject pyrf_evlist__type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "perf.evlist",
.tp_basicsize = sizeof(struct pyrf_evlist),
.tp_dealloc = (destructor)pyrf_evlist__delete,
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
.tp_as_sequence = &pyrf_evlist__sequence_methods,
.tp_doc = pyrf_evlist__doc,
.tp_methods = pyrf_evlist__methods,
.tp_init = (initproc)pyrf_evlist__init,
};
static int pyrf_evlist__setup_types(void)
{
pyrf_evlist__type.tp_new = PyType_GenericNew;
return PyType_Ready(&pyrf_evlist__type);
}
#define PERF_CONST(name) { #name, PERF_##name }
static struct {
const char *name;
int value;
} perf__constants[] = {
PERF_CONST(TYPE_HARDWARE),
PERF_CONST(TYPE_SOFTWARE),
PERF_CONST(TYPE_TRACEPOINT),
PERF_CONST(TYPE_HW_CACHE),
PERF_CONST(TYPE_RAW),
PERF_CONST(TYPE_BREAKPOINT),
PERF_CONST(COUNT_HW_CPU_CYCLES),
PERF_CONST(COUNT_HW_INSTRUCTIONS),
PERF_CONST(COUNT_HW_CACHE_REFERENCES),
PERF_CONST(COUNT_HW_CACHE_MISSES),
PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
PERF_CONST(COUNT_HW_BRANCH_MISSES),
PERF_CONST(COUNT_HW_BUS_CYCLES),
PERF_CONST(COUNT_HW_CACHE_L1D),
PERF_CONST(COUNT_HW_CACHE_L1I),
PERF_CONST(COUNT_HW_CACHE_LL),
PERF_CONST(COUNT_HW_CACHE_DTLB),
PERF_CONST(COUNT_HW_CACHE_ITLB),
PERF_CONST(COUNT_HW_CACHE_BPU),
PERF_CONST(COUNT_HW_CACHE_OP_READ),
PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
PERF_CONST(COUNT_SW_CPU_CLOCK),
PERF_CONST(COUNT_SW_TASK_CLOCK),
PERF_CONST(COUNT_SW_PAGE_FAULTS),
PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
PERF_CONST(COUNT_SW_EMULATION_FAULTS),
PERF_CONST(COUNT_SW_DUMMY),
PERF_CONST(SAMPLE_IP),
PERF_CONST(SAMPLE_TID),
PERF_CONST(SAMPLE_TIME),
PERF_CONST(SAMPLE_ADDR),
PERF_CONST(SAMPLE_READ),
PERF_CONST(SAMPLE_CALLCHAIN),
PERF_CONST(SAMPLE_ID),
PERF_CONST(SAMPLE_CPU),
PERF_CONST(SAMPLE_PERIOD),
PERF_CONST(SAMPLE_STREAM_ID),
PERF_CONST(SAMPLE_RAW),
PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
PERF_CONST(FORMAT_ID),
PERF_CONST(FORMAT_GROUP),
PERF_CONST(RECORD_MMAP),
PERF_CONST(RECORD_LOST),
PERF_CONST(RECORD_COMM),
PERF_CONST(RECORD_EXIT),
PERF_CONST(RECORD_THROTTLE),
PERF_CONST(RECORD_UNTHROTTLE),
PERF_CONST(RECORD_FORK),
PERF_CONST(RECORD_READ),
PERF_CONST(RECORD_SAMPLE),
PERF_CONST(RECORD_MMAP2),
PERF_CONST(RECORD_AUX),
PERF_CONST(RECORD_ITRACE_START),
PERF_CONST(RECORD_LOST_SAMPLES),
PERF_CONST(RECORD_SWITCH),
PERF_CONST(RECORD_SWITCH_CPU_WIDE),
PERF_CONST(RECORD_MISC_SWITCH_OUT),
{ .name = NULL, },
};
static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
PyObject *args, PyObject *kwargs)
{
#ifndef HAVE_LIBTRACEEVENT
return NULL;
#else
struct tep_event *tp_format;
static char *kwlist[] = { "sys", "name", NULL };
char *sys = NULL;
char *name = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
&sys, &name))
return NULL;
tp_format = trace_event__tp_format(sys, name);
if (IS_ERR(tp_format))
return _PyLong_FromLong(-1);
return _PyLong_FromLong(tp_format->id);
#endif // HAVE_LIBTRACEEVENT
}
static PyMethodDef perf__methods[] = {
{
.ml_name = "tracepoint",
.ml_meth = (PyCFunction) pyrf__tracepoint,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("Get tracepoint config.")
},
{ .ml_name = NULL, }
};
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initperf(void)
#else
PyMODINIT_FUNC PyInit_perf(void)
#endif
{
PyObject *obj;
int i;
PyObject *dict;
#if PY_MAJOR_VERSION < 3
PyObject *module = Py_InitModule("perf", perf__methods);
#else
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"perf", /* m_name */
"", /* m_doc */
-1, /* m_size */
perf__methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
PyObject *module = PyModule_Create(&moduledef);
#endif
if (module == NULL ||
pyrf_event__setup_types() < 0 ||
pyrf_evlist__setup_types() < 0 ||
pyrf_evsel__setup_types() < 0 ||
pyrf_thread_map__setup_types() < 0 ||
pyrf_cpu_map__setup_types() < 0)
#if PY_MAJOR_VERSION < 3
return;
#else
return module;
#endif
/* The page_size is placed in util object. */
page_size = sysconf(_SC_PAGE_SIZE);
Py_INCREF(&pyrf_evlist__type);
PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
Py_INCREF(&pyrf_evsel__type);
PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
Py_INCREF(&pyrf_mmap_event__type);
PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
Py_INCREF(&pyrf_lost_event__type);
PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
Py_INCREF(&pyrf_comm_event__type);
PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
Py_INCREF(&pyrf_task_event__type);
PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
Py_INCREF(&pyrf_throttle_event__type);
PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
Py_INCREF(&pyrf_task_event__type);
PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
Py_INCREF(&pyrf_read_event__type);
PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
Py_INCREF(&pyrf_sample_event__type);
PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
Py_INCREF(&pyrf_context_switch_event__type);
PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
Py_INCREF(&pyrf_thread_map__type);
PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
Py_INCREF(&pyrf_cpu_map__type);
PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
dict = PyModule_GetDict(module);
if (dict == NULL)
goto error;
for (i = 0; perf__constants[i].name != NULL; i++) {
obj = _PyLong_FromLong(perf__constants[i].value);
if (obj == NULL)
goto error;
PyDict_SetItemString(dict, perf__constants[i].name, obj);
Py_DECREF(obj);
}
error:
if (PyErr_Occurred())
PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
/*
* Dummy, to avoid dragging all the test_attr infrastructure in the python
* binding.
*/
void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
int fd, int group_fd, unsigned long flags)
{
}
void evlist__free_stats(struct evlist *evlist)
{
}
| linux-master | tools/perf/util/python.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.