python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Capability utilities
*/
#ifdef HAVE_LIBCAP_SUPPORT
#include "cap.h"
#include <stdbool.h>
#include <sys/capability.h>
bool perf_cap__capable(cap_value_t cap)
{
cap_flag_value_t val;
cap_t caps = cap_get_proc();
if (!caps)
return false;
if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val) != 0)
val = CAP_CLEAR;
if (cap_free(caps) != 0)
return false;
return val == CAP_SET;
}
#endif /* HAVE_LIBCAP_SUPPORT */
| linux-master | tools/perf/util/cap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*
* Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
* copyright notes.
*/
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h> // sysconf()
#include <perf/mmap.h>
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
#include "cpumap.h"
#include "debug.h"
#include "event.h"
#include "mmap.h"
#include "../perf.h"
#include <internal/lib.h> /* page_size */
#include <linux/bitmap.h>
#define MASK_SIZE 1023
void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
{
char buf[MASK_SIZE + 1];
size_t len;
len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
buf[len] = '\0';
pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
}
size_t mmap__mmap_len(struct mmap *map)
{
return perf_mmap__mmap_len(&map->core);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,
int fd __maybe_unused)
{
return 0;
}
void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}
void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
off_t auxtrace_offset __maybe_unused,
unsigned int auxtrace_pages __maybe_unused,
bool auxtrace_overwrite __maybe_unused)
{
}
void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
struct evlist *evlist __maybe_unused,
struct evsel *evsel __maybe_unused,
int idx __maybe_unused)
{
}
#ifdef HAVE_AIO_SUPPORT
static int perf_mmap__aio_enabled(struct mmap *map)
{
return map->aio.nr_cblocks > 0;
}
#ifdef HAVE_LIBNUMA_SUPPORT
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->aio.data[idx] == MAP_FAILED) {
map->aio.data[idx] = NULL;
return -1;
}
return 0;
}
static void perf_mmap__aio_free(struct mmap *map, int idx)
{
if (map->aio.data[idx]) {
munmap(map->aio.data[idx], mmap__mmap_len(map));
map->aio.data[idx] = NULL;
}
}
static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity)
{
void *data;
size_t mmap_len;
unsigned long *node_mask;
unsigned long node_index;
int err = 0;
if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
data = map->aio.data[idx];
mmap_len = mmap__mmap_len(map);
node_index = cpu__get_node(cpu);
node_mask = bitmap_zalloc(node_index + 1);
if (!node_mask) {
pr_err("Failed to allocate node mask for mbind: error %m\n");
return -1;
}
__set_bit(node_index, node_mask);
if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
data, data + mmap_len, node_index);
err = -1;
}
bitmap_free(node_mask);
}
return err;
}
#else /* !HAVE_LIBNUMA_SUPPORT */
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = malloc(mmap__mmap_len(map));
if (map->aio.data[idx] == NULL)
return -1;
return 0;
}
static void perf_mmap__aio_free(struct mmap *map, int idx)
{
zfree(&(map->aio.data[idx]));
}
static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused)
{
return 0;
}
#endif
static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
{
int delta_max, i, prio, ret;
map->aio.nr_cblocks = mp->nr_cblocks;
if (map->aio.nr_cblocks) {
map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
if (!map->aio.aiocb) {
pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
return -1;
}
map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
if (!map->aio.cblocks) {
pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
return -1;
}
map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
if (!map->aio.data) {
pr_debug2("failed to allocate data buffer, error %m\n");
return -1;
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
for (i = 0; i < map->aio.nr_cblocks; ++i) {
ret = perf_mmap__aio_alloc(map, i);
if (ret == -1) {
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
if (ret == -1)
return -1;
/*
* Use cblock.aio_fildes value different from -1
* to denote started aio write operation on the
* cblock so it requires explicit record__aio_sync()
* call prior the cblock may be reused again.
*/
map->aio.cblocks[i].aio_fildes = -1;
/*
* Allocate cblocks with priority delta to have
* faster aio write system calls because queued requests
* are kept in separate per-prio queues and adding
* a new request will iterate thru shorter per-prio
* list. Blocks with numbers higher than
* _SC_AIO_PRIO_DELTA_MAX go with priority 0.
*/
prio = delta_max - i;
map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
}
}
return 0;
}
static void perf_mmap__aio_munmap(struct mmap *map)
{
int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
perf_mmap__aio_free(map, i);
if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
zfree(&map->aio.aiocb);
}
#else /* !HAVE_AIO_SUPPORT */
static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
{
return 0;
}
static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
struct mmap_params *mp __maybe_unused)
{
return 0;
}
static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
{
}
#endif
void mmap__munmap(struct mmap *map)
{
bitmap_free(map->affinity_mask.bits);
#ifndef PYTHON_PERF
zstd_fini(&map->zstd_data);
#endif
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
munmap(map->data, mmap__mmap_len(map));
map->data = NULL;
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
static void build_node_mask(int node, struct mmap_cpu_mask *mask)
{
int idx, nr_cpus;
struct perf_cpu cpu;
const struct perf_cpu_map *cpu_map = NULL;
cpu_map = cpu_map__online();
if (!cpu_map)
return;
nr_cpus = perf_cpu_map__nr(cpu_map);
for (idx = 0; idx < nr_cpus; idx++) {
cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
if (cpu__get_node(cpu) == node)
__set_bit(cpu.cpu, mask->bits);
}
}
static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
{
map->affinity_mask.nbits = cpu__max_cpu().cpu;
map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
if (!map->affinity_mask.bits)
return -1;
if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
else if (mp->affinity == PERF_AFFINITY_CPU)
__set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
return 0;
}
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu)
{
if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
return -1;
}
if (mp->affinity != PERF_AFFINITY_SYS &&
perf_mmap__setup_affinity_mask(map, mp)) {
pr_debug2("failed to alloc mmap affinity mask, error %d\n",
errno);
return -1;
}
if (verbose == 2)
mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
map->core.flush = mp->flush;
map->comp_level = mp->comp_level;
#ifndef PYTHON_PERF
if (zstd_init(&map->zstd_data, map->comp_level)) {
pr_debug2("failed to init mmap compressor, error %d\n", errno);
return -1;
}
#endif
if (map->comp_level && !perf_mmap__aio_enabled(map)) {
map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->data == MAP_FAILED) {
pr_debug2("failed to mmap data buffer, error %d\n",
errno);
map->data = NULL;
return -1;
}
}
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->core.base, fd))
return -1;
return perf_mmap__aio_mmap(map, mp);
}
int perf_mmap__push(struct mmap *md, void *to,
int push(struct mmap *map, void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(&md->core);
unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
int rc = 0;
rc = perf_mmap__read_init(&md->core);
if (rc < 0)
return (rc == -EAGAIN) ? 1 : -1;
size = md->core.end - md->core.start;
if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
buf = &data[md->core.start & md->core.mask];
size = md->core.mask + 1 - (md->core.start & md->core.mask);
md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
goto out;
}
}
buf = &data[md->core.start & md->core.mask];
size = md->core.end - md->core.start;
md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
goto out;
}
md->core.prev = head;
perf_mmap__consume(&md->core);
out:
return rc;
}
int mmap_cpu_mask__duplicate(struct mmap_cpu_mask *original, struct mmap_cpu_mask *clone)
{
clone->nbits = original->nbits;
clone->bits = bitmap_zalloc(original->nbits);
if (!clone->bits)
return -ENOMEM;
memcpy(clone->bits, original->bits, MMAP_CPU_MASK_BYTES(original));
return 0;
}
| linux-master | tools/perf/util/mmap.c |
/* SPDX-License-Identifier: LGPL-2.1 */
#include "util/debug.h"
#include "util/rlimit.h"
#include <sys/time.h>
#include <sys/resource.h>
/*
* Bump the memlock so that we can get bpf maps of a reasonable size,
* like the ones used with 'perf trace' and with 'perf test bpf',
* improve this to some specific request if needed.
*/
void rlimit__bump_memlock(void)
{
struct rlimit rlim;
if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
rlim.rlim_cur *= 4;
rlim.rlim_max *= 4;
if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
rlim.rlim_cur /= 2;
rlim.rlim_max /= 2;
if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0)
pr_debug("Couldn't bump rlimit(MEMLOCK), failures may take place when creating BPF maps, etc\n");
}
}
}
| linux-master | tools/perf/util/rlimit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dwarf-aux.c : libdw auxiliary interfaces
*/
#include <errno.h>
#include <inttypes.h>
#include <stdbool.h>
#include <stdlib.h>
#include "debug.h"
#include "dwarf-aux.h"
#include "strbuf.h"
#include "string2.h"
/**
* cu_find_realpath - Find the realpath of the target file
* @cu_die: A DIE(dwarf information entry) of CU(compilation Unit)
* @fname: The tail filename of the target file
*
* Find the real(long) path of @fname in @cu_die.
*/
const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
{
Dwarf_Files *files;
size_t nfiles, i;
const char *src = NULL;
int ret;
if (!fname)
return NULL;
ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
if (ret != 0)
return NULL;
for (i = 0; i < nfiles; i++) {
src = dwarf_filesrc(files, i, NULL, NULL);
if (strtailcmp(src, fname) == 0)
break;
}
if (i == nfiles)
return NULL;
return src;
}
/**
* cu_get_comp_dir - Get the path of compilation directory
* @cu_die: a CU DIE
*
* Get the path of compilation directory of given @cu_die.
* Since this depends on DW_AT_comp_dir, older gcc will not
* embedded it. In that case, this returns NULL.
*/
const char *cu_get_comp_dir(Dwarf_Die *cu_die)
{
Dwarf_Attribute attr;
if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL)
return NULL;
return dwarf_formstring(&attr);
}
/* Unlike dwarf_getsrc_die(), cu_getsrc_die() only returns statement line */
static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr)
{
Dwarf_Addr laddr;
Dwarf_Lines *lines;
Dwarf_Line *line;
size_t nlines, l, u, n;
bool flag;
if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0 ||
nlines == 0)
return NULL;
/* Lines are sorted by address, use binary search */
l = 0; u = nlines - 1;
while (l < u) {
n = u - (u - l) / 2;
line = dwarf_onesrcline(lines, n);
if (!line || dwarf_lineaddr(line, &laddr) != 0)
return NULL;
if (addr < laddr)
u = n - 1;
else
l = n;
}
/* Going backward to find the lowest line */
do {
line = dwarf_onesrcline(lines, --l);
if (!line || dwarf_lineaddr(line, &laddr) != 0)
return NULL;
} while (laddr == addr);
l++;
/* Going forward to find the statement line */
do {
line = dwarf_onesrcline(lines, l++);
if (!line || dwarf_lineaddr(line, &laddr) != 0 ||
dwarf_linebeginstatement(line, &flag) != 0)
return NULL;
if (laddr > addr)
return NULL;
} while (!flag);
return line;
}
/**
* cu_find_lineinfo - Get a line number and file name for given address
* @cu_die: a CU DIE
* @addr: An address
* @fname: a pointer which returns the file name string
* @lineno: a pointer which returns the line number
*
* Find a line number and file name for @addr in @cu_die.
*/
int cu_find_lineinfo(Dwarf_Die *cu_die, Dwarf_Addr addr,
const char **fname, int *lineno)
{
Dwarf_Line *line;
Dwarf_Die die_mem;
Dwarf_Addr faddr;
if (die_find_realfunc(cu_die, addr, &die_mem)
&& die_entrypc(&die_mem, &faddr) == 0 &&
faddr == addr) {
*fname = die_get_decl_file(&die_mem);
dwarf_decl_line(&die_mem, lineno);
goto out;
}
line = cu_getsrc_die(cu_die, addr);
if (line && dwarf_lineno(line, lineno) == 0) {
*fname = dwarf_linesrc(line, NULL, NULL);
if (!*fname)
/* line number is useless without filename */
*lineno = 0;
}
out:
return (*lineno && *fname) ? *lineno : -ENOENT;
}
static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data);
/**
* cu_walk_functions_at - Walk on function DIEs at given address
* @cu_die: A CU DIE
* @addr: An address
* @callback: A callback which called with found DIEs
* @data: A user data
*
* Walk on function DIEs at given @addr in @cu_die. Passed DIEs
* should be subprogram or inlined-subroutines.
*/
int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
int (*callback)(Dwarf_Die *, void *), void *data)
{
Dwarf_Die die_mem;
Dwarf_Die *sc_die;
int ret = -ENOENT;
/* Inlined function could be recursive. Trace it until fail */
for (sc_die = die_find_realfunc(cu_die, addr, &die_mem);
sc_die != NULL;
sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr,
&die_mem)) {
ret = callback(sc_die, data);
if (ret)
break;
}
return ret;
}
/**
* die_get_linkage_name - Get the linkage name of the object
* @dw_die: A DIE of the object
*
* Get the linkage name attribute of given @dw_die.
* For C++ binary, the linkage name will be the mangled symbol.
*/
const char *die_get_linkage_name(Dwarf_Die *dw_die)
{
Dwarf_Attribute attr;
if (dwarf_attr_integrate(dw_die, DW_AT_linkage_name, &attr) == NULL)
return NULL;
return dwarf_formstring(&attr);
}
/**
* die_compare_name - Compare diename and tname
* @dw_die: a DIE
* @tname: a string of target name
*
* Compare the name of @dw_die and @tname. Return false if @dw_die has no name.
*/
bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
{
const char *name;
name = dwarf_diename(dw_die);
return name ? (strcmp(tname, name) == 0) : false;
}
/**
* die_match_name - Match diename/linkage name and glob
* @dw_die: a DIE
* @glob: a string of target glob pattern
*
* Glob matching the name of @dw_die and @glob. Return false if matching fail.
* This also match linkage name.
*/
bool die_match_name(Dwarf_Die *dw_die, const char *glob)
{
const char *name;
name = dwarf_diename(dw_die);
if (name && strglobmatch(name, glob))
return true;
/* fall back to check linkage name */
name = die_get_linkage_name(dw_die);
if (name && strglobmatch(name, glob))
return true;
return false;
}
/**
* die_get_call_lineno - Get callsite line number of inline-function instance
* @in_die: a DIE of an inlined function instance
*
* Get call-site line number of @in_die. This means from where the inline
* function is called.
*/
int die_get_call_lineno(Dwarf_Die *in_die)
{
Dwarf_Attribute attr;
Dwarf_Word ret;
if (!dwarf_attr(in_die, DW_AT_call_line, &attr))
return -ENOENT;
dwarf_formudata(&attr, &ret);
return (int)ret;
}
/**
* die_get_type - Get type DIE
* @vr_die: a DIE of a variable
* @die_mem: where to store a type DIE
*
* Get a DIE of the type of given variable (@vr_die), and store
* it to die_mem. Return NULL if fails to get a type DIE.
*/
Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
{
Dwarf_Attribute attr;
if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
dwarf_formref_die(&attr, die_mem))
return die_mem;
else
return NULL;
}
/* Get a type die, but skip qualifiers */
static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
{
int tag;
do {
vr_die = die_get_type(vr_die, die_mem);
if (!vr_die)
break;
tag = dwarf_tag(vr_die);
} while (tag == DW_TAG_const_type ||
tag == DW_TAG_restrict_type ||
tag == DW_TAG_volatile_type ||
tag == DW_TAG_shared_type);
return vr_die;
}
/**
* die_get_real_type - Get a type die, but skip qualifiers and typedef
* @vr_die: a DIE of a variable
* @die_mem: where to store a type DIE
*
* Get a DIE of the type of given variable (@vr_die), and store
* it to die_mem. Return NULL if fails to get a type DIE.
* If the type is qualifiers (e.g. const) or typedef, this skips it
* and tries to find real type (structure or basic types, e.g. int).
*/
Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
{
do {
vr_die = __die_get_real_type(vr_die, die_mem);
} while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
return vr_die;
}
/* Get attribute and translate it as a udata */
static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
Dwarf_Word *result)
{
Dwarf_Attribute attr;
if (dwarf_attr_integrate(tp_die, attr_name, &attr) == NULL ||
dwarf_formudata(&attr, result) != 0)
return -ENOENT;
return 0;
}
/**
* die_is_signed_type - Check whether a type DIE is signed or not
* @tp_die: a DIE of a type
*
* Get the encoding of @tp_die and return true if the encoding
* is signed.
*/
bool die_is_signed_type(Dwarf_Die *tp_die)
{
Dwarf_Word ret;
if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret))
return false;
return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
ret == DW_ATE_signed_fixed);
}
/**
* die_is_func_def - Ensure that this DIE is a subprogram and definition
* @dw_die: a DIE
*
* Ensure that this DIE is a subprogram and NOT a declaration. This
* returns true if @dw_die is a function definition.
**/
bool die_is_func_def(Dwarf_Die *dw_die)
{
Dwarf_Attribute attr;
Dwarf_Addr addr = 0;
if (dwarf_tag(dw_die) != DW_TAG_subprogram)
return false;
if (dwarf_attr(dw_die, DW_AT_declaration, &attr))
return false;
/*
* DW_AT_declaration can be lost from function declaration
* by gcc's bug #97060.
* So we need to check this subprogram DIE has DW_AT_inline
* or an entry address.
*/
if (!dwarf_attr(dw_die, DW_AT_inline, &attr) &&
die_entrypc(dw_die, &addr) < 0)
return false;
return true;
}
/**
* die_entrypc - Returns entry PC (the lowest address) of a DIE
* @dw_die: a DIE
* @addr: where to store entry PC
*
* Since dwarf_entrypc() does not return entry PC if the DIE has only address
* range, we have to use this to retrieve the lowest address from the address
* range attribute.
*/
int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
{
Dwarf_Addr base, end;
Dwarf_Attribute attr;
if (!addr)
return -EINVAL;
if (dwarf_entrypc(dw_die, addr) == 0)
return 0;
/*
* Since the dwarf_ranges() will return 0 if there is no
* DW_AT_ranges attribute, we should check it first.
*/
if (!dwarf_attr(dw_die, DW_AT_ranges, &attr))
return -ENOENT;
return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
}
/**
* die_is_func_instance - Ensure that this DIE is an instance of a subprogram
* @dw_die: a DIE
*
* Ensure that this DIE is an instance (which has an entry address).
* This returns true if @dw_die is a function instance. If not, the @dw_die
* must be a prototype. You can use die_walk_instances() to find actual
* instances.
**/
bool die_is_func_instance(Dwarf_Die *dw_die)
{
Dwarf_Addr tmp;
Dwarf_Attribute attr_mem;
int tag = dwarf_tag(dw_die);
if (tag != DW_TAG_subprogram &&
tag != DW_TAG_inlined_subroutine)
return false;
return dwarf_entrypc(dw_die, &tmp) == 0 ||
dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL;
}
/**
* die_get_data_member_location - Get the data-member offset
* @mb_die: a DIE of a member of a data structure
* @offs: The offset of the member in the data structure
*
* Get the offset of @mb_die in the data structure including @mb_die, and
* stores result offset to @offs. If any error occurs this returns errno.
*/
int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
{
Dwarf_Attribute attr;
Dwarf_Op *expr;
size_t nexpr;
int ret;
if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
return -ENOENT;
if (dwarf_formudata(&attr, offs) != 0) {
/* DW_AT_data_member_location should be DW_OP_plus_uconst */
ret = dwarf_getlocation(&attr, &expr, &nexpr);
if (ret < 0 || nexpr == 0)
return -ENOENT;
if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
expr[0].atom, nexpr);
return -ENOTSUP;
}
*offs = (Dwarf_Word)expr[0].number;
}
return 0;
}
/* Get the call file index number in CU DIE */
static int die_get_call_fileno(Dwarf_Die *in_die)
{
Dwarf_Word idx;
if (die_get_attr_udata(in_die, DW_AT_call_file, &idx) == 0)
return (int)idx;
else
return -ENOENT;
}
/* Get the declared file index number in CU DIE */
static int die_get_decl_fileno(Dwarf_Die *pdie)
{
Dwarf_Word idx;
if (die_get_attr_udata(pdie, DW_AT_decl_file, &idx) == 0)
return (int)idx;
else
return -ENOENT;
}
/* Return the file name by index */
static const char *die_get_file_name(Dwarf_Die *dw_die, int idx)
{
Dwarf_Die cu_die;
Dwarf_Files *files;
Dwarf_Attribute attr_mem;
if (idx < 0 || !dwarf_attr_integrate(dw_die, DW_AT_decl_file, &attr_mem) ||
!dwarf_cu_die(attr_mem.cu, &cu_die, NULL, NULL, NULL, NULL, NULL, NULL) ||
dwarf_getsrcfiles(&cu_die, &files, NULL) != 0)
return NULL;
return dwarf_filesrc(files, idx, NULL, NULL);
}
/**
* die_get_call_file - Get callsite file name of inlined function instance
* @in_die: a DIE of an inlined function instance
*
* Get call-site file name of @in_die. This means from which file the inline
* function is called.
*/
const char *die_get_call_file(Dwarf_Die *in_die)
{
return die_get_file_name(in_die, die_get_call_fileno(in_die));
}
/**
* die_get_decl_file - Find the declared file name of this DIE
* @dw_die: a DIE for something declared.
*
* Get declared file name of @dw_die.
* NOTE: Since some version of clang DWARF5 implementation incorrectly uses
* file index 0 for DW_AT_decl_file, die_get_decl_file() will return NULL for
* such cases. Use this function instead.
*/
const char *die_get_decl_file(Dwarf_Die *dw_die)
{
return die_get_file_name(dw_die, die_get_decl_fileno(dw_die));
}
/**
* die_find_child - Generic DIE search function in DIE tree
* @rt_die: a root DIE
* @callback: a callback function
* @data: a user data passed to the callback function
* @die_mem: a buffer for result DIE
*
* Trace DIE tree from @rt_die and call @callback for each child DIE.
* If @callback returns DIE_FIND_CB_END, this stores the DIE into
* @die_mem and returns it. If @callback returns DIE_FIND_CB_CONTINUE,
* this continues to trace the tree. Optionally, @callback can return
* DIE_FIND_CB_CHILD and DIE_FIND_CB_SIBLING, those means trace only
* the children and trace only the siblings respectively.
* Returns NULL if @callback can't find any appropriate DIE.
*/
Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
int (*callback)(Dwarf_Die *, void *),
void *data, Dwarf_Die *die_mem)
{
Dwarf_Die child_die;
int ret;
ret = dwarf_child(rt_die, die_mem);
if (ret != 0)
return NULL;
do {
ret = callback(die_mem, data);
if (ret == DIE_FIND_CB_END)
return die_mem;
if ((ret & DIE_FIND_CB_CHILD) &&
die_find_child(die_mem, callback, data, &child_die)) {
memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
return die_mem;
}
} while ((ret & DIE_FIND_CB_SIBLING) &&
dwarf_siblingof(die_mem, die_mem) == 0);
return NULL;
}
struct __addr_die_search_param {
Dwarf_Addr addr;
Dwarf_Die *die_mem;
};
static int __die_search_func_tail_cb(Dwarf_Die *fn_die, void *data)
{
struct __addr_die_search_param *ad = data;
Dwarf_Addr addr = 0;
if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
!dwarf_highpc(fn_die, &addr) &&
addr == ad->addr) {
memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
return DWARF_CB_ABORT;
}
return DWARF_CB_OK;
}
/**
* die_find_tailfunc - Search for a non-inlined function with tail call at
* given address
* @cu_die: a CU DIE which including @addr
* @addr: target address
* @die_mem: a buffer for result DIE
*
* Search for a non-inlined function DIE with tail call at @addr. Stores the
* DIE to @die_mem and returns it if found. Returns NULL if failed.
*/
Dwarf_Die *die_find_tailfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
Dwarf_Die *die_mem)
{
struct __addr_die_search_param ad;
ad.addr = addr;
ad.die_mem = die_mem;
/* dwarf_getscopes can't find subprogram. */
if (!dwarf_getfuncs(cu_die, __die_search_func_tail_cb, &ad, 0))
return NULL;
else
return die_mem;
}
/* die_find callback for non-inlined function search */
static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
{
struct __addr_die_search_param *ad = data;
/*
* Since a declaration entry doesn't has given pc, this always returns
* function definition entry.
*/
if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
dwarf_haspc(fn_die, ad->addr)) {
memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
return DWARF_CB_ABORT;
}
return DWARF_CB_OK;
}
/**
* die_find_realfunc - Search a non-inlined function at given address
* @cu_die: a CU DIE which including @addr
* @addr: target address
* @die_mem: a buffer for result DIE
*
* Search a non-inlined function DIE which includes @addr. Stores the
* DIE to @die_mem and returns it if found. Returns NULL if failed.
*/
Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
Dwarf_Die *die_mem)
{
struct __addr_die_search_param ad;
ad.addr = addr;
ad.die_mem = die_mem;
/* dwarf_getscopes can't find subprogram. */
if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
return NULL;
else
return die_mem;
}
/* die_find callback for inline function search */
static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
{
Dwarf_Addr *addr = data;
if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
dwarf_haspc(die_mem, *addr))
return DIE_FIND_CB_END;
return DIE_FIND_CB_CONTINUE;
}
/**
* die_find_top_inlinefunc - Search the top inlined function at given address
* @sp_die: a subprogram DIE which including @addr
* @addr: target address
* @die_mem: a buffer for result DIE
*
* Search an inlined function DIE which includes @addr. Stores the
* DIE to @die_mem and returns it if found. Returns NULL if failed.
* Even if several inlined functions are expanded recursively, this
* doesn't trace it down, and returns the topmost one.
*/
Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
Dwarf_Die *die_mem)
{
return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
}
/**
* die_find_inlinefunc - Search an inlined function at given address
* @sp_die: a subprogram DIE which including @addr
* @addr: target address
* @die_mem: a buffer for result DIE
*
* Search an inlined function DIE which includes @addr. Stores the
* DIE to @die_mem and returns it if found. Returns NULL if failed.
* If several inlined functions are expanded recursively, this trace
* it down and returns deepest one.
*/
Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
Dwarf_Die *die_mem)
{
Dwarf_Die tmp_die;
sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die);
if (!sp_die)
return NULL;
/* Inlined function could be recursive. Trace it until fail */
while (sp_die) {
memcpy(die_mem, sp_die, sizeof(Dwarf_Die));
sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr,
&tmp_die);
}
return die_mem;
}
struct __instance_walk_param {
void *addr;
int (*callback)(Dwarf_Die *, void *);
void *data;
int retval;
};
static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
{
struct __instance_walk_param *iwp = data;
Dwarf_Attribute attr_mem;
Dwarf_Die origin_mem;
Dwarf_Attribute *attr;
Dwarf_Die *origin;
int tmp;
if (!die_is_func_instance(inst))
return DIE_FIND_CB_CONTINUE;
attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
if (attr == NULL)
return DIE_FIND_CB_CONTINUE;
origin = dwarf_formref_die(attr, &origin_mem);
if (origin == NULL || origin->addr != iwp->addr)
return DIE_FIND_CB_CONTINUE;
/* Ignore redundant instances */
if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) {
dwarf_decl_line(origin, &tmp);
if (die_get_call_lineno(inst) == tmp) {
tmp = die_get_decl_fileno(origin);
if (die_get_call_fileno(inst) == tmp)
return DIE_FIND_CB_CONTINUE;
}
}
iwp->retval = iwp->callback(inst, iwp->data);
return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE;
}
/**
* die_walk_instances - Walk on instances of given DIE
* @or_die: an abstract original DIE
* @callback: a callback function which is called with instance DIE
* @data: user data
*
* Walk on the instances of give @in_die. @in_die must be an inlined function
* declaration. This returns the return value of @callback if it returns
* non-zero value, or -ENOENT if there is no instance.
*/
int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *),
void *data)
{
Dwarf_Die cu_die;
Dwarf_Die die_mem;
struct __instance_walk_param iwp = {
.addr = or_die->addr,
.callback = callback,
.data = data,
.retval = -ENOENT,
};
if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL)
return -ENOENT;
die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem);
return iwp.retval;
}
/* Line walker internal parameters */
struct __line_walk_param {
bool recursive;
line_walk_callback_t callback;
void *data;
int retval;
};
static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
{
struct __line_walk_param *lw = data;
Dwarf_Addr addr = 0;
const char *fname;
int lineno;
if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
fname = die_get_call_file(in_die);
lineno = die_get_call_lineno(in_die);
if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) {
lw->retval = lw->callback(fname, lineno, addr, lw->data);
if (lw->retval != 0)
return DIE_FIND_CB_END;
}
if (!lw->recursive)
return DIE_FIND_CB_SIBLING;
}
if (addr) {
fname = die_get_decl_file(in_die);
if (fname && dwarf_decl_line(in_die, &lineno) == 0) {
lw->retval = lw->callback(fname, lineno, addr, lw->data);
if (lw->retval != 0)
return DIE_FIND_CB_END;
}
}
/* Continue to search nested inlined function call-sites */
return DIE_FIND_CB_CONTINUE;
}
/* Walk on lines of blocks included in given DIE */
static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
line_walk_callback_t callback, void *data)
{
struct __line_walk_param lw = {
.recursive = recursive,
.callback = callback,
.data = data,
.retval = 0,
};
Dwarf_Die die_mem;
Dwarf_Addr addr;
const char *fname;
int lineno;
/* Handle function declaration line */
fname = die_get_decl_file(sp_die);
if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
die_entrypc(sp_die, &addr) == 0) {
lw.retval = callback(fname, lineno, addr, data);
if (lw.retval != 0)
goto done;
}
die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem);
done:
return lw.retval;
}
static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
{
struct __line_walk_param *lw = data;
/*
* Since inlined function can include another inlined function in
* the same file, we need to walk in it recursively.
*/
lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
if (lw->retval != 0)
return DWARF_CB_ABORT;
return DWARF_CB_OK;
}
/**
* die_walk_lines - Walk on lines inside given DIE
* @rt_die: a root DIE (CU, subprogram or inlined_subroutine)
* @callback: callback routine
* @data: user data
*
* Walk on all lines inside given @rt_die and call @callback on each line.
* If the @rt_die is a function, walk only on the lines inside the function,
* otherwise @rt_die must be a CU DIE.
* Note that this walks not only dwarf line list, but also function entries
* and inline call-site.
*/
int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
{
Dwarf_Lines *lines;
Dwarf_Line *line;
Dwarf_Addr addr;
const char *fname, *decf = NULL, *inf = NULL;
int lineno, ret = 0;
int decl = 0, inl;
Dwarf_Die die_mem, *cu_die;
size_t nlines, i;
bool flag;
/* Get the CU die */
if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL);
dwarf_decl_line(rt_die, &decl);
decf = die_get_decl_file(rt_die);
if (!decf) {
pr_debug2("Failed to get the declared file name of %s\n",
dwarf_diename(rt_die));
return -EINVAL;
}
} else
cu_die = rt_die;
if (!cu_die) {
pr_debug2("Failed to get CU from given DIE.\n");
return -EINVAL;
}
/* Get lines list in the CU */
if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) {
pr_debug2("Failed to get source lines on this CU.\n");
return -ENOENT;
}
pr_debug2("Get %zd lines from this CU\n", nlines);
/* Walk on the lines on lines list */
for (i = 0; i < nlines; i++) {
line = dwarf_onesrcline(lines, i);
if (line == NULL ||
dwarf_lineno(line, &lineno) != 0 ||
dwarf_lineaddr(line, &addr) != 0) {
pr_debug2("Failed to get line info. "
"Possible error in debuginfo.\n");
continue;
}
/* Skip end-of-sequence */
if (dwarf_lineendsequence(line, &flag) != 0 || flag)
continue;
/* Skip Non statement line-info */
if (dwarf_linebeginstatement(line, &flag) != 0 || !flag)
continue;
/* Filter lines based on address */
if (rt_die != cu_die) {
/*
* Address filtering
* The line is included in given function, and
* no inline block includes it.
*/
if (!dwarf_haspc(rt_die, addr))
continue;
if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
/* Call-site check */
inf = die_get_call_file(&die_mem);
if ((inf && !strcmp(inf, decf)) &&
die_get_call_lineno(&die_mem) == lineno)
goto found;
dwarf_decl_line(&die_mem, &inl);
if (inl != decl ||
decf != die_get_decl_file(&die_mem))
continue;
}
}
found:
/* Get source line */
fname = dwarf_linesrc(line, NULL, NULL);
ret = callback(fname, lineno, addr, data);
if (ret != 0)
return ret;
}
/*
* Dwarf lines doesn't include function declarations and inlined
* subroutines. We have to check functions list or given function.
*/
if (rt_die != cu_die)
/*
* Don't need walk inlined functions recursively, because
* inner inlined functions don't have the lines of the
* specified function.
*/
ret = __die_walk_funclines(rt_die, false, callback, data);
else {
struct __line_walk_param param = {
.callback = callback,
.data = data,
.retval = 0,
};
dwarf_getfuncs(cu_die, __die_walk_culines_cb, ¶m, 0);
ret = param.retval;
}
return ret;
}
struct __find_variable_param {
const char *name;
Dwarf_Addr addr;
};
static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
{
struct __find_variable_param *fvp = data;
Dwarf_Attribute attr;
int tag;
tag = dwarf_tag(die_mem);
if ((tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) &&
die_compare_name(die_mem, fvp->name) &&
/*
* Does the DIE have location information or const value
* or external instance?
*/
(dwarf_attr(die_mem, DW_AT_external, &attr) ||
dwarf_attr(die_mem, DW_AT_location, &attr) ||
dwarf_attr(die_mem, DW_AT_const_value, &attr)))
return DIE_FIND_CB_END;
if (dwarf_haspc(die_mem, fvp->addr))
return DIE_FIND_CB_CONTINUE;
else
return DIE_FIND_CB_SIBLING;
}
/**
* die_find_variable_at - Find a given name variable at given address
* @sp_die: a function DIE
* @name: variable name
* @addr: address
* @die_mem: a buffer for result DIE
*
* Find a variable DIE called @name at @addr in @sp_die.
*/
Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
Dwarf_Addr addr, Dwarf_Die *die_mem)
{
struct __find_variable_param fvp = { .name = name, .addr = addr};
return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
die_mem);
}
static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
{
const char *name = data;
if (dwarf_tag(die_mem) == DW_TAG_member) {
if (die_compare_name(die_mem, name))
return DIE_FIND_CB_END;
else if (!dwarf_diename(die_mem)) { /* Unnamed structure */
Dwarf_Die type_die, tmp_die;
if (die_get_type(die_mem, &type_die) &&
die_find_member(&type_die, name, &tmp_die))
return DIE_FIND_CB_END;
}
}
return DIE_FIND_CB_SIBLING;
}
/**
* die_find_member - Find a given name member in a data structure
* @st_die: a data structure type DIE
* @name: member name
* @die_mem: a buffer for result DIE
*
* Find a member DIE called @name in @st_die.
*/
Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
Dwarf_Die *die_mem)
{
return die_find_child(st_die, __die_find_member_cb, (void *)name,
die_mem);
}
/**
* die_get_typename - Get the name of given variable DIE
* @vr_die: a variable DIE
* @buf: a strbuf for result type name
*
* Get the name of @vr_die and stores it to @buf. Return 0 if succeeded.
* and Return -ENOENT if failed to find type name.
* Note that the result will stores typedef name if possible, and stores
* "*(function_type)" if the type is a function pointer.
*/
int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
{
Dwarf_Die type;
int tag, ret;
const char *tmp = "";
if (__die_get_real_type(vr_die, &type) == NULL)
return -ENOENT;
tag = dwarf_tag(&type);
if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
tmp = "*";
else if (tag == DW_TAG_subroutine_type) {
/* Function pointer */
return strbuf_add(buf, "(function_type)", 15);
} else {
const char *name = dwarf_diename(&type);
if (tag == DW_TAG_union_type)
tmp = "union ";
else if (tag == DW_TAG_structure_type)
tmp = "struct ";
else if (tag == DW_TAG_enumeration_type)
tmp = "enum ";
else if (name == NULL)
return -ENOENT;
/* Write a base name */
return strbuf_addf(buf, "%s%s", tmp, name ?: "");
}
ret = die_get_typename(&type, buf);
return ret ? ret : strbuf_addstr(buf, tmp);
}
/**
* die_get_varname - Get the name and type of given variable DIE
* @vr_die: a variable DIE
* @buf: a strbuf for type and variable name
*
* Get the name and type of @vr_die and stores it in @buf as "type\tname".
*/
int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
{
int ret;
ret = die_get_typename(vr_die, buf);
if (ret < 0) {
pr_debug("Failed to get type, make it unknown.\n");
ret = strbuf_add(buf, "(unknown_type)", 14);
}
return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
}
#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
/**
* die_get_var_innermost_scope - Get innermost scope range of given variable DIE
* @sp_die: a subprogram DIE
* @vr_die: a variable DIE
* @buf: a strbuf for variable byte offset range
*
* Get the innermost scope range of @vr_die and stores it in @buf as
* "@<function_name+[NN-NN,NN-NN]>".
*/
static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
struct strbuf *buf)
{
Dwarf_Die *scopes;
int count;
size_t offset = 0;
Dwarf_Addr base;
Dwarf_Addr start, end;
Dwarf_Addr entry;
int ret;
bool first = true;
const char *name;
ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
name = dwarf_diename(sp_die);
if (!name)
return -ENOENT;
count = dwarf_getscopes_die(vr_die, &scopes);
/* (*SCOPES)[1] is the DIE for the scope containing that scope */
if (count <= 1) {
ret = -EINVAL;
goto out;
}
while ((offset = dwarf_ranges(&scopes[1], offset, &base,
&start, &end)) > 0) {
start -= entry;
end -= entry;
if (first) {
ret = strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
name, start, end);
first = false;
} else {
ret = strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
start, end);
}
if (ret < 0)
goto out;
}
if (!first)
ret = strbuf_add(buf, "]>", 2);
out:
free(scopes);
return ret;
}
/**
* die_get_var_range - Get byte offset range of given variable DIE
* @sp_die: a subprogram DIE
* @vr_die: a variable DIE
* @buf: a strbuf for type and variable name and byte offset range
*
* Get the byte offset range of @vr_die and stores it in @buf as
* "@<function_name+[NN-NN,NN-NN]>".
*/
int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
{
int ret = 0;
Dwarf_Addr base;
Dwarf_Addr start, end;
Dwarf_Addr entry;
Dwarf_Op *op;
size_t nops;
size_t offset = 0;
Dwarf_Attribute attr;
bool first = true;
const char *name;
ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
name = dwarf_diename(sp_die);
if (!name)
return -ENOENT;
if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
return -EINVAL;
while ((offset = dwarf_getlocations(&attr, offset, &base,
&start, &end, &op, &nops)) > 0) {
if (start == 0) {
/* Single Location Descriptions */
ret = die_get_var_innermost_scope(sp_die, vr_die, buf);
goto out;
}
/* Location Lists */
start -= entry;
end -= entry;
if (first) {
ret = strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
name, start, end);
first = false;
} else {
ret = strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
start, end);
}
if (ret < 0)
goto out;
}
if (!first)
ret = strbuf_add(buf, "]>", 2);
out:
return ret;
}
#else
int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
Dwarf_Die *vr_die __maybe_unused,
struct strbuf *buf __maybe_unused)
{
return -ENOTSUP;
}
#endif
/*
* die_has_loclist - Check if DW_AT_location of @vr_die is a location list
* @vr_die: a variable DIE
*/
static bool die_has_loclist(Dwarf_Die *vr_die)
{
Dwarf_Attribute loc;
int tag = dwarf_tag(vr_die);
if (tag != DW_TAG_formal_parameter &&
tag != DW_TAG_variable)
return false;
return (dwarf_attr_integrate(vr_die, DW_AT_location, &loc) &&
dwarf_whatform(&loc) == DW_FORM_sec_offset);
}
/*
* die_is_optimized_target - Check if target program is compiled with
* optimization
* @cu_die: a CU DIE
*
* For any object in given CU whose DW_AT_location is a location list,
* target program is compiled with optimization. This is applicable to
* clang as well.
*/
bool die_is_optimized_target(Dwarf_Die *cu_die)
{
Dwarf_Die tmp_die;
if (die_has_loclist(cu_die))
return true;
if (!dwarf_child(cu_die, &tmp_die) &&
die_is_optimized_target(&tmp_die))
return true;
if (!dwarf_siblingof(cu_die, &tmp_die) &&
die_is_optimized_target(&tmp_die))
return true;
return false;
}
/*
* die_search_idx - Search index of given line address
* @lines: Line records of single CU
* @nr_lines: Number of @lines
* @addr: address we are looking for
* @idx: index to be set by this function (return value)
*
* Search for @addr by looping over every lines of CU. If address
* matches, set index of that line in @idx. Note that single source
* line can have multiple line records. i.e. single source line can
* have multiple index.
*/
static bool die_search_idx(Dwarf_Lines *lines, unsigned long nr_lines,
Dwarf_Addr addr, unsigned long *idx)
{
unsigned long i;
Dwarf_Addr tmp;
for (i = 0; i < nr_lines; i++) {
if (dwarf_lineaddr(dwarf_onesrcline(lines, i), &tmp))
return false;
if (tmp == addr) {
*idx = i;
return true;
}
}
return false;
}
/*
* die_get_postprologue_addr - Search next address after function prologue
* @entrypc_idx: entrypc index
* @lines: Line records of single CU
* @nr_lines: Number of @lines
* @hignpc: high PC address of function
* @postprologue_addr: Next address after function prologue (return value)
*
* Look for prologue-end marker. If there is no explicit marker, return
* address of next line record or next source line.
*/
static bool die_get_postprologue_addr(unsigned long entrypc_idx,
Dwarf_Lines *lines,
unsigned long nr_lines,
Dwarf_Addr highpc,
Dwarf_Addr *postprologue_addr)
{
unsigned long i;
int entrypc_lno, lno;
Dwarf_Line *line;
Dwarf_Addr addr;
bool p_end;
/* entrypc_lno is actual source line number */
line = dwarf_onesrcline(lines, entrypc_idx);
if (dwarf_lineno(line, &entrypc_lno))
return false;
for (i = entrypc_idx; i < nr_lines; i++) {
line = dwarf_onesrcline(lines, i);
if (dwarf_lineaddr(line, &addr) ||
dwarf_lineno(line, &lno) ||
dwarf_lineprologueend(line, &p_end))
return false;
/* highpc is exclusive. [entrypc,highpc) */
if (addr >= highpc)
break;
/* clang supports prologue-end marker */
if (p_end)
break;
/* Actual next line in source */
if (lno != entrypc_lno)
break;
/*
* Single source line can have multiple line records.
* For Example,
* void foo() { printf("hello\n"); }
* contains two line records. One points to declaration and
* other points to printf() line. Variable 'lno' won't get
* incremented in this case but 'i' will.
*/
if (i != entrypc_idx)
break;
}
dwarf_lineaddr(line, postprologue_addr);
if (*postprologue_addr >= highpc)
dwarf_lineaddr(dwarf_onesrcline(lines, i - 1),
postprologue_addr);
return true;
}
/*
* die_skip_prologue - Use next address after prologue as probe location
* @sp_die: a subprogram DIE
* @cu_die: a CU DIE
* @entrypc: entrypc of the function
*
* Function prologue prepares stack and registers before executing function
* logic. When target program is compiled without optimization, function
* parameter information is only valid after prologue. When we probe entrypc
* of the function, and try to record function parameter, it contains
* garbage value.
*/
void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die,
Dwarf_Addr *entrypc)
{
size_t nr_lines = 0;
unsigned long entrypc_idx = 0;
Dwarf_Lines *lines = NULL;
Dwarf_Addr postprologue_addr;
Dwarf_Addr highpc;
if (dwarf_highpc(sp_die, &highpc))
return;
if (dwarf_getsrclines(cu_die, &lines, &nr_lines))
return;
if (!die_search_idx(lines, nr_lines, *entrypc, &entrypc_idx))
return;
if (!die_get_postprologue_addr(entrypc_idx, lines, nr_lines,
highpc, &postprologue_addr))
return;
*entrypc = postprologue_addr;
}
| linux-master | tools/perf/util/dwarf-aux.c |
// SPDX-License-Identifier: GPL-2.0
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <inttypes.h>
#include "dso.h"
#include "map.h"
#include "maps.h"
#include "symbol.h"
#include "symsrc.h"
#include "demangle-cxx.h"
#include "demangle-ocaml.h"
#include "demangle-java.h"
#include "demangle-rust.h"
#include "machine.h"
#include "vdso.h"
#include "debug.h"
#include "util/copyfile.h"
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <symbol/kallsyms.h>
#include <internal/lib.h>
#ifdef HAVE_LIBBFD_SUPPORT
#define PACKAGE 'perf'
#include <bfd.h>
#endif
#if defined(HAVE_LIBBFD_SUPPORT) || defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
#ifndef DMGL_PARAMS
#define DMGL_PARAMS (1 << 0) /* Include function args */
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
#endif
#endif
#ifndef EM_AARCH64
#define EM_AARCH64 183 /* ARM 64 bit */
#endif
#ifndef EM_LOONGARCH
#define EM_LOONGARCH 258
#endif
#ifndef ELF32_ST_VISIBILITY
#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
#endif
/* For ELF64 the definitions are the same. */
#ifndef ELF64_ST_VISIBILITY
#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
#endif
/* How to extract information held in the st_other field. */
#ifndef GELF_ST_VISIBILITY
#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
#endif
typedef Elf64_Nhdr GElf_Nhdr;
#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
static int elf_getphdrnum(Elf *elf, size_t *dst)
{
GElf_Ehdr gehdr;
GElf_Ehdr *ehdr;
ehdr = gelf_getehdr(elf, &gehdr);
if (!ehdr)
return -1;
*dst = ehdr->e_phnum;
return 0;
}
#endif
#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
{
pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
return -1;
}
#endif
#ifndef NT_GNU_BUILD_ID
#define NT_GNU_BUILD_ID 3
#endif
/**
* elf_symtab__for_each_symbol - iterate thru all the symbols
*
* @syms: struct elf_symtab instance to iterate
* @idx: uint32_t idx
* @sym: GElf_Sym iterator
*/
#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
for (idx = 0, gelf_getsym(syms, idx, &sym);\
idx < nr_syms; \
idx++, gelf_getsym(syms, idx, &sym))
static inline uint8_t elf_sym__type(const GElf_Sym *sym)
{
return GELF_ST_TYPE(sym->st_info);
}
static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
{
return GELF_ST_VISIBILITY(sym->st_other);
}
#ifndef STT_GNU_IFUNC
#define STT_GNU_IFUNC 10
#endif
static inline int elf_sym__is_function(const GElf_Sym *sym)
{
return (elf_sym__type(sym) == STT_FUNC ||
elf_sym__type(sym) == STT_GNU_IFUNC) &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF;
}
static inline bool elf_sym__is_object(const GElf_Sym *sym)
{
return elf_sym__type(sym) == STT_OBJECT &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF;
}
static inline int elf_sym__is_label(const GElf_Sym *sym)
{
return elf_sym__type(sym) == STT_NOTYPE &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF &&
sym->st_shndx != SHN_ABS &&
elf_sym__visibility(sym) != STV_HIDDEN &&
elf_sym__visibility(sym) != STV_INTERNAL;
}
static bool elf_sym__filter(GElf_Sym *sym)
{
return elf_sym__is_function(sym) || elf_sym__is_object(sym);
}
static inline const char *elf_sym__name(const GElf_Sym *sym,
const Elf_Data *symstrs)
{
return symstrs->d_buf + sym->st_name;
}
static inline const char *elf_sec__name(const GElf_Shdr *shdr,
const Elf_Data *secstrs)
{
return secstrs->d_buf + shdr->sh_name;
}
static inline int elf_sec__is_text(const GElf_Shdr *shdr,
const Elf_Data *secstrs)
{
return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
}
static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
const Elf_Data *secstrs)
{
return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
}
static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
{
return elf_sec__is_text(shdr, secstrs) ||
elf_sec__is_data(shdr, secstrs);
}
static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
{
Elf_Scn *sec = NULL;
GElf_Shdr shdr;
size_t cnt = 1;
while ((sec = elf_nextscn(elf, sec)) != NULL) {
gelf_getshdr(sec, &shdr);
if ((addr >= shdr.sh_addr) &&
(addr < (shdr.sh_addr + shdr.sh_size)))
return cnt;
++cnt;
}
return -1;
}
Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
GElf_Shdr *shp, const char *name, size_t *idx)
{
Elf_Scn *sec = NULL;
size_t cnt = 1;
/* ELF is corrupted/truncated, avoid calling elf_strptr. */
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
return NULL;
while ((sec = elf_nextscn(elf, sec)) != NULL) {
char *str;
gelf_getshdr(sec, shp);
str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
if (str && !strcmp(name, str)) {
if (idx)
*idx = cnt;
return sec;
}
++cnt;
}
return NULL;
}
bool filename__has_section(const char *filename, const char *sec)
{
int fd;
Elf *elf;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
bool found = false;
fd = open(filename, O_RDONLY);
if (fd < 0)
return false;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
goto out;
if (gelf_getehdr(elf, &ehdr) == NULL)
goto elf_out;
found = !!elf_section_by_name(elf, &ehdr, &shdr, sec, NULL);
elf_out:
elf_end(elf);
out:
close(fd);
return found;
}
static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
{
size_t i, phdrnum;
u64 sz;
if (elf_getphdrnum(elf, &phdrnum))
return -1;
for (i = 0; i < phdrnum; i++) {
if (gelf_getphdr(elf, i, phdr) == NULL)
return -1;
if (phdr->p_type != PT_LOAD)
continue;
sz = max(phdr->p_memsz, phdr->p_filesz);
if (!sz)
continue;
if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
return 0;
}
/* Not found any valid program header */
return -1;
}
static bool want_demangle(bool is_kernel_sym)
{
return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
}
/*
* Demangle C++ function signature, typically replaced by demangle-cxx.cpp
* version.
*/
__weak char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
bool modifiers __maybe_unused)
{
#ifdef HAVE_LIBBFD_SUPPORT
int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
return bfd_demangle(NULL, str, flags);
#elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
return cplus_demangle(str, flags);
#else
return NULL;
#endif
}
static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
{
char *demangled = NULL;
/*
* We need to figure out if the object was created from C++ sources
* DWARF DW_compile_unit has this, but we don't always have access
* to it...
*/
if (!want_demangle(dso->kernel || kmodule))
return demangled;
demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0);
if (demangled == NULL) {
demangled = ocaml_demangle_sym(elf_name);
if (demangled == NULL) {
demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
}
}
else if (rust_is_mangled(demangled))
/*
* Input to Rust demangling is the BFD-demangled
* name which it Rust-demangles in place.
*/
rust_demangle_sym(demangled);
return demangled;
}
struct rel_info {
u32 nr_entries;
u32 *sorted;
bool is_rela;
Elf_Data *reldata;
GElf_Rela rela;
GElf_Rel rel;
};
static u32 get_rel_symidx(struct rel_info *ri, u32 idx)
{
idx = ri->sorted ? ri->sorted[idx] : idx;
if (ri->is_rela) {
gelf_getrela(ri->reldata, idx, &ri->rela);
return GELF_R_SYM(ri->rela.r_info);
}
gelf_getrel(ri->reldata, idx, &ri->rel);
return GELF_R_SYM(ri->rel.r_info);
}
static u64 get_rel_offset(struct rel_info *ri, u32 x)
{
if (ri->is_rela) {
GElf_Rela rela;
gelf_getrela(ri->reldata, x, &rela);
return rela.r_offset;
} else {
GElf_Rel rel;
gelf_getrel(ri->reldata, x, &rel);
return rel.r_offset;
}
}
static int rel_cmp(const void *a, const void *b, void *r)
{
struct rel_info *ri = r;
u64 a_offset = get_rel_offset(ri, *(const u32 *)a);
u64 b_offset = get_rel_offset(ri, *(const u32 *)b);
return a_offset < b_offset ? -1 : (a_offset > b_offset ? 1 : 0);
}
static int sort_rel(struct rel_info *ri)
{
size_t sz = sizeof(ri->sorted[0]);
u32 i;
ri->sorted = calloc(ri->nr_entries, sz);
if (!ri->sorted)
return -1;
for (i = 0; i < ri->nr_entries; i++)
ri->sorted[i] = i;
qsort_r(ri->sorted, ri->nr_entries, sz, rel_cmp, ri);
return 0;
}
/*
* For x86_64, the GNU linker is putting IFUNC information in the relocation
* addend.
*/
static bool addend_may_be_ifunc(GElf_Ehdr *ehdr, struct rel_info *ri)
{
return ehdr->e_machine == EM_X86_64 && ri->is_rela &&
GELF_R_TYPE(ri->rela.r_info) == R_X86_64_IRELATIVE;
}
static bool get_ifunc_name(Elf *elf, struct dso *dso, GElf_Ehdr *ehdr,
struct rel_info *ri, char *buf, size_t buf_sz)
{
u64 addr = ri->rela.r_addend;
struct symbol *sym;
GElf_Phdr phdr;
if (!addend_may_be_ifunc(ehdr, ri))
return false;
if (elf_read_program_header(elf, addr, &phdr))
return false;
addr -= phdr.p_vaddr - phdr.p_offset;
sym = dso__find_symbol_nocache(dso, addr);
/* Expecting the address to be an IFUNC or IFUNC alias */
if (!sym || sym->start != addr || (sym->type != STT_GNU_IFUNC && !sym->ifunc_alias))
return false;
snprintf(buf, buf_sz, "%s@plt", sym->name);
return true;
}
static void exit_rel(struct rel_info *ri)
{
zfree(&ri->sorted);
}
static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt,
u64 *plt_header_size, u64 *plt_entry_size)
{
switch (ehdr->e_machine) {
case EM_ARM:
*plt_header_size = 20;
*plt_entry_size = 12;
return true;
case EM_AARCH64:
*plt_header_size = 32;
*plt_entry_size = 16;
return true;
case EM_LOONGARCH:
*plt_header_size = 32;
*plt_entry_size = 16;
return true;
case EM_SPARC:
*plt_header_size = 48;
*plt_entry_size = 12;
return true;
case EM_SPARCV9:
*plt_header_size = 128;
*plt_entry_size = 32;
return true;
case EM_386:
case EM_X86_64:
*plt_entry_size = shdr_plt->sh_entsize;
/* Size is 8 or 16, if not, assume alignment indicates size */
if (*plt_entry_size != 8 && *plt_entry_size != 16)
*plt_entry_size = shdr_plt->sh_addralign == 8 ? 8 : 16;
*plt_header_size = *plt_entry_size;
break;
default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
*plt_header_size = shdr_plt->sh_entsize;
*plt_entry_size = shdr_plt->sh_entsize;
break;
}
if (*plt_entry_size)
return true;
pr_debug("Missing PLT entry size for %s\n", dso->long_name);
return false;
}
static bool machine_is_x86(GElf_Half e_machine)
{
return e_machine == EM_386 || e_machine == EM_X86_64;
}
struct rela_dyn {
GElf_Addr offset;
u32 sym_idx;
};
struct rela_dyn_info {
struct dso *dso;
Elf_Data *plt_got_data;
u32 nr_entries;
struct rela_dyn *sorted;
Elf_Data *dynsym_data;
Elf_Data *dynstr_data;
Elf_Data *rela_dyn_data;
};
static void exit_rela_dyn(struct rela_dyn_info *di)
{
zfree(&di->sorted);
}
static int cmp_offset(const void *a, const void *b)
{
const struct rela_dyn *va = a;
const struct rela_dyn *vb = b;
return va->offset < vb->offset ? -1 : (va->offset > vb->offset ? 1 : 0);
}
static int sort_rela_dyn(struct rela_dyn_info *di)
{
u32 i, n;
di->sorted = calloc(di->nr_entries, sizeof(di->sorted[0]));
if (!di->sorted)
return -1;
/* Get data for sorting: the offset and symbol index */
for (i = 0, n = 0; i < di->nr_entries; i++) {
GElf_Rela rela;
u32 sym_idx;
gelf_getrela(di->rela_dyn_data, i, &rela);
sym_idx = GELF_R_SYM(rela.r_info);
if (sym_idx) {
di->sorted[n].sym_idx = sym_idx;
di->sorted[n].offset = rela.r_offset;
n += 1;
}
}
/* Sort by offset */
di->nr_entries = n;
qsort(di->sorted, n, sizeof(di->sorted[0]), cmp_offset);
return 0;
}
static void get_rela_dyn_info(Elf *elf, GElf_Ehdr *ehdr, struct rela_dyn_info *di, Elf_Scn *scn)
{
GElf_Shdr rela_dyn_shdr;
GElf_Shdr shdr;
di->plt_got_data = elf_getdata(scn, NULL);
scn = elf_section_by_name(elf, ehdr, &rela_dyn_shdr, ".rela.dyn", NULL);
if (!scn || !rela_dyn_shdr.sh_link || !rela_dyn_shdr.sh_entsize)
return;
di->nr_entries = rela_dyn_shdr.sh_size / rela_dyn_shdr.sh_entsize;
di->rela_dyn_data = elf_getdata(scn, NULL);
scn = elf_getscn(elf, rela_dyn_shdr.sh_link);
if (!scn || !gelf_getshdr(scn, &shdr) || !shdr.sh_link)
return;
di->dynsym_data = elf_getdata(scn, NULL);
di->dynstr_data = elf_getdata(elf_getscn(elf, shdr.sh_link), NULL);
if (!di->plt_got_data || !di->dynstr_data || !di->dynsym_data || !di->rela_dyn_data)
return;
/* Sort into offset order */
sort_rela_dyn(di);
}
/* Get instruction displacement from a plt entry for x86_64 */
static u32 get_x86_64_plt_disp(const u8 *p)
{
u8 endbr64[] = {0xf3, 0x0f, 0x1e, 0xfa};
int n = 0;
/* Skip endbr64 */
if (!memcmp(p, endbr64, sizeof(endbr64)))
n += sizeof(endbr64);
/* Skip bnd prefix */
if (p[n] == 0xf2)
n += 1;
/* jmp with 4-byte displacement */
if (p[n] == 0xff && p[n + 1] == 0x25) {
u32 disp;
n += 2;
/* Also add offset from start of entry to end of instruction */
memcpy(&disp, p + n, sizeof(disp));
return n + 4 + le32toh(disp);
}
return 0;
}
static bool get_plt_got_name(GElf_Shdr *shdr, size_t i,
struct rela_dyn_info *di,
char *buf, size_t buf_sz)
{
struct rela_dyn vi, *vr;
const char *sym_name;
char *demangled;
GElf_Sym sym;
bool result;
u32 disp;
if (!di->sorted)
return false;
disp = get_x86_64_plt_disp(di->plt_got_data->d_buf + i);
if (!disp)
return false;
/* Compute target offset of the .plt.got entry */
vi.offset = shdr->sh_offset + di->plt_got_data->d_off + i + disp;
/* Find that offset in .rela.dyn (sorted by offset) */
vr = bsearch(&vi, di->sorted, di->nr_entries, sizeof(di->sorted[0]), cmp_offset);
if (!vr)
return false;
/* Get the associated symbol */
gelf_getsym(di->dynsym_data, vr->sym_idx, &sym);
sym_name = elf_sym__name(&sym, di->dynstr_data);
demangled = demangle_sym(di->dso, 0, sym_name);
if (demangled != NULL)
sym_name = demangled;
snprintf(buf, buf_sz, "%s@plt", sym_name);
result = *sym_name;
free(demangled);
return result;
}
static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf,
GElf_Ehdr *ehdr,
char *buf, size_t buf_sz)
{
struct rela_dyn_info di = { .dso = dso };
struct symbol *sym;
GElf_Shdr shdr;
Elf_Scn *scn;
int err = -1;
size_t i;
scn = elf_section_by_name(elf, ehdr, &shdr, ".plt.got", NULL);
if (!scn || !shdr.sh_entsize)
return 0;
if (ehdr->e_machine == EM_X86_64)
get_rela_dyn_info(elf, ehdr, &di, scn);
for (i = 0; i < shdr.sh_size; i += shdr.sh_entsize) {
if (!get_plt_got_name(&shdr, i, &di, buf, buf_sz))
snprintf(buf, buf_sz, "offset_%#" PRIx64 "@plt", (u64)shdr.sh_offset + i);
sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf);
if (!sym)
goto out;
symbols__insert(&dso->symbols, sym);
}
err = 0;
out:
exit_rela_dyn(&di);
return err;
}
/*
* We need to check if we have a .dynsym, so that we can handle the
* .plt, synthesizing its symbols, that aren't on the symtabs (be it
* .dynsym or .symtab).
* And always look at the original dso, not at debuginfo packages, that
* have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
*/
int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
{
uint32_t idx;
GElf_Sym sym;
u64 plt_offset, plt_header_size, plt_entry_size;
GElf_Shdr shdr_plt, plt_sec_shdr;
struct symbol *f, *plt_sym;
GElf_Shdr shdr_rel_plt, shdr_dynsym;
Elf_Data *syms, *symstrs;
Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
GElf_Ehdr ehdr;
char sympltname[1024];
Elf *elf;
int nr = 0, err = -1;
struct rel_info ri = { .is_rela = false };
bool lazy_plt;
elf = ss->elf;
ehdr = ss->ehdr;
if (!elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL))
return 0;
/*
* A symbol from a previous section (e.g. .init) can have been expanded
* by symbols__fixup_end() to overlap .plt. Truncate it before adding
* a symbol for .plt header.
*/
f = dso__find_symbol_nocache(dso, shdr_plt.sh_offset);
if (f && f->start < shdr_plt.sh_offset && f->end > shdr_plt.sh_offset)
f->end = shdr_plt.sh_offset;
if (!get_plt_sizes(dso, &ehdr, &shdr_plt, &plt_header_size, &plt_entry_size))
return 0;
/* Add a symbol for .plt header */
plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt");
if (!plt_sym)
goto out_elf_end;
symbols__insert(&dso->symbols, plt_sym);
/* Only x86 has .plt.got */
if (machine_is_x86(ehdr.e_machine) &&
dso__synthesize_plt_got_symbols(dso, elf, &ehdr, sympltname, sizeof(sympltname)))
goto out_elf_end;
/* Only x86 has .plt.sec */
if (machine_is_x86(ehdr.e_machine) &&
elf_section_by_name(elf, &ehdr, &plt_sec_shdr, ".plt.sec", NULL)) {
if (!get_plt_sizes(dso, &ehdr, &plt_sec_shdr, &plt_header_size, &plt_entry_size))
return 0;
/* Extend .plt symbol to entire .plt */
plt_sym->end = plt_sym->start + shdr_plt.sh_size;
/* Use .plt.sec offset */
plt_offset = plt_sec_shdr.sh_offset;
lazy_plt = false;
} else {
plt_offset = shdr_plt.sh_offset;
lazy_plt = true;
}
scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
".rela.plt", NULL);
if (scn_plt_rel == NULL) {
scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
".rel.plt", NULL);
if (scn_plt_rel == NULL)
return 0;
}
if (shdr_rel_plt.sh_type != SHT_RELA &&
shdr_rel_plt.sh_type != SHT_REL)
return 0;
if (!shdr_rel_plt.sh_link)
return 0;
if (shdr_rel_plt.sh_link == ss->dynsym_idx) {
scn_dynsym = ss->dynsym;
shdr_dynsym = ss->dynshdr;
} else if (shdr_rel_plt.sh_link == ss->symtab_idx) {
/*
* A static executable can have a .plt due to IFUNCs, in which
* case .symtab is used not .dynsym.
*/
scn_dynsym = ss->symtab;
shdr_dynsym = ss->symshdr;
} else {
goto out_elf_end;
}
if (!scn_dynsym)
return 0;
/*
* Fetch the relocation section to find the idxes to the GOT
* and the symbols in the .dynsym they refer to.
*/
ri.reldata = elf_getdata(scn_plt_rel, NULL);
if (!ri.reldata)
goto out_elf_end;
syms = elf_getdata(scn_dynsym, NULL);
if (syms == NULL)
goto out_elf_end;
scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
if (scn_symstrs == NULL)
goto out_elf_end;
symstrs = elf_getdata(scn_symstrs, NULL);
if (symstrs == NULL)
goto out_elf_end;
if (symstrs->d_size == 0)
goto out_elf_end;
ri.nr_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
ri.is_rela = shdr_rel_plt.sh_type == SHT_RELA;
if (lazy_plt) {
/*
* Assume a .plt with the same number of entries as the number
* of relocation entries is not lazy and does not have a header.
*/
if (ri.nr_entries * plt_entry_size == shdr_plt.sh_size)
dso__delete_symbol(dso, plt_sym);
else
plt_offset += plt_header_size;
}
/*
* x86 doesn't insert IFUNC relocations in .plt order, so sort to get
* back in order.
*/
if (machine_is_x86(ehdr.e_machine) && sort_rel(&ri))
goto out_elf_end;
for (idx = 0; idx < ri.nr_entries; idx++) {
const char *elf_name = NULL;
char *demangled = NULL;
gelf_getsym(syms, get_rel_symidx(&ri, idx), &sym);
elf_name = elf_sym__name(&sym, symstrs);
demangled = demangle_sym(dso, 0, elf_name);
if (demangled)
elf_name = demangled;
if (*elf_name)
snprintf(sympltname, sizeof(sympltname), "%s@plt", elf_name);
else if (!get_ifunc_name(elf, dso, &ehdr, &ri, sympltname, sizeof(sympltname)))
snprintf(sympltname, sizeof(sympltname),
"offset_%#" PRIx64 "@plt", plt_offset);
free(demangled);
f = symbol__new(plt_offset, plt_entry_size, STB_GLOBAL, STT_FUNC, sympltname);
if (!f)
goto out_elf_end;
plt_offset += plt_entry_size;
symbols__insert(&dso->symbols, f);
++nr;
}
err = 0;
out_elf_end:
exit_rel(&ri);
if (err == 0)
return nr;
pr_debug("%s: problems reading %s PLT info.\n",
__func__, dso->long_name);
return 0;
}
char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
{
return demangle_sym(dso, kmodule, elf_name);
}
/*
* Align offset to 4 bytes as needed for note name and descriptor data.
*/
#define NOTE_ALIGN(n) (((n) + 3) & -4U)
static int elf_read_build_id(Elf *elf, void *bf, size_t size)
{
int err = -1;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
Elf_Data *data;
Elf_Scn *sec;
Elf_Kind ek;
void *ptr;
if (size < BUILD_ID_SIZE)
goto out;
ek = elf_kind(elf);
if (ek != ELF_K_ELF)
goto out;
if (gelf_getehdr(elf, &ehdr) == NULL) {
pr_err("%s: cannot get elf header.\n", __func__);
goto out;
}
/*
* Check following sections for notes:
* '.note.gnu.build-id'
* '.notes'
* '.note' (VDSO specific)
*/
do {
sec = elf_section_by_name(elf, &ehdr, &shdr,
".note.gnu.build-id", NULL);
if (sec)
break;
sec = elf_section_by_name(elf, &ehdr, &shdr,
".notes", NULL);
if (sec)
break;
sec = elf_section_by_name(elf, &ehdr, &shdr,
".note", NULL);
if (sec)
break;
return err;
} while (0);
data = elf_getdata(sec, NULL);
if (data == NULL)
goto out;
ptr = data->d_buf;
while (ptr < (data->d_buf + data->d_size)) {
GElf_Nhdr *nhdr = ptr;
size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
descsz = NOTE_ALIGN(nhdr->n_descsz);
const char *name;
ptr += sizeof(*nhdr);
name = ptr;
ptr += namesz;
if (nhdr->n_type == NT_GNU_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU")) {
if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
size_t sz = min(size, descsz);
memcpy(bf, ptr, sz);
memset(bf + sz, 0, size - sz);
err = sz;
break;
}
}
ptr += descsz;
}
out:
return err;
}
#ifdef HAVE_LIBBFD_BUILDID_SUPPORT
static int read_build_id(const char *filename, struct build_id *bid)
{
size_t size = sizeof(bid->data);
int err = -1;
bfd *abfd;
abfd = bfd_openr(filename, NULL);
if (!abfd)
return -1;
if (!bfd_check_format(abfd, bfd_object)) {
pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
goto out_close;
}
if (!abfd->build_id || abfd->build_id->size > size)
goto out_close;
memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
err = bid->size = abfd->build_id->size;
out_close:
bfd_close(abfd);
return err;
}
#else // HAVE_LIBBFD_BUILDID_SUPPORT
static int read_build_id(const char *filename, struct build_id *bid)
{
size_t size = sizeof(bid->data);
int fd, err = -1;
Elf *elf;
if (size < BUILD_ID_SIZE)
goto out;
fd = open(filename, O_RDONLY);
if (fd < 0)
goto out;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
goto out_close;
}
err = elf_read_build_id(elf, bid->data, size);
if (err > 0)
bid->size = err;
elf_end(elf);
out_close:
close(fd);
out:
return err;
}
#endif // HAVE_LIBBFD_BUILDID_SUPPORT
int filename__read_build_id(const char *filename, struct build_id *bid)
{
struct kmod_path m = { .name = NULL, };
char path[PATH_MAX];
int err;
if (!filename)
return -EFAULT;
err = kmod_path__parse(&m, filename);
if (err)
return -1;
if (m.comp) {
int error = 0, fd;
fd = filename__decompress(filename, path, sizeof(path), m.comp, &error);
if (fd < 0) {
pr_debug("Failed to decompress (error %d) %s\n",
error, filename);
return -1;
}
close(fd);
filename = path;
}
err = read_build_id(filename, bid);
if (m.comp)
unlink(filename);
return err;
}
int sysfs__read_build_id(const char *filename, struct build_id *bid)
{
size_t size = sizeof(bid->data);
int fd, err = -1;
fd = open(filename, O_RDONLY);
if (fd < 0)
goto out;
while (1) {
char bf[BUFSIZ];
GElf_Nhdr nhdr;
size_t namesz, descsz;
if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
break;
namesz = NOTE_ALIGN(nhdr.n_namesz);
descsz = NOTE_ALIGN(nhdr.n_descsz);
if (nhdr.n_type == NT_GNU_BUILD_ID &&
nhdr.n_namesz == sizeof("GNU")) {
if (read(fd, bf, namesz) != (ssize_t)namesz)
break;
if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
size_t sz = min(descsz, size);
if (read(fd, bid->data, sz) == (ssize_t)sz) {
memset(bid->data + sz, 0, size - sz);
bid->size = sz;
err = 0;
break;
}
} else if (read(fd, bf, descsz) != (ssize_t)descsz)
break;
} else {
int n = namesz + descsz;
if (n > (int)sizeof(bf)) {
n = sizeof(bf);
pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
__func__, filename, nhdr.n_namesz, nhdr.n_descsz);
}
if (read(fd, bf, n) != n)
break;
}
}
close(fd);
out:
return err;
}
#ifdef HAVE_LIBBFD_SUPPORT
int filename__read_debuglink(const char *filename, char *debuglink,
size_t size)
{
int err = -1;
asection *section;
bfd *abfd;
abfd = bfd_openr(filename, NULL);
if (!abfd)
return -1;
if (!bfd_check_format(abfd, bfd_object)) {
pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
goto out_close;
}
section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
if (!section)
goto out_close;
if (section->size > size)
goto out_close;
if (!bfd_get_section_contents(abfd, section, debuglink, 0,
section->size))
goto out_close;
err = 0;
out_close:
bfd_close(abfd);
return err;
}
#else
int filename__read_debuglink(const char *filename, char *debuglink,
size_t size)
{
int fd, err = -1;
Elf *elf;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
Elf_Data *data;
Elf_Scn *sec;
Elf_Kind ek;
fd = open(filename, O_RDONLY);
if (fd < 0)
goto out;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
goto out_close;
}
ek = elf_kind(elf);
if (ek != ELF_K_ELF)
goto out_elf_end;
if (gelf_getehdr(elf, &ehdr) == NULL) {
pr_err("%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
sec = elf_section_by_name(elf, &ehdr, &shdr,
".gnu_debuglink", NULL);
if (sec == NULL)
goto out_elf_end;
data = elf_getdata(sec, NULL);
if (data == NULL)
goto out_elf_end;
/* the start of this section is a zero-terminated string */
strncpy(debuglink, data->d_buf, size);
err = 0;
out_elf_end:
elf_end(elf);
out_close:
close(fd);
out:
return err;
}
#endif
static int dso__swap_init(struct dso *dso, unsigned char eidata)
{
static unsigned int const endian = 1;
dso->needs_swap = DSO_SWAP__NO;
switch (eidata) {
case ELFDATA2LSB:
/* We are big endian, DSO is little endian. */
if (*(unsigned char const *)&endian != 1)
dso->needs_swap = DSO_SWAP__YES;
break;
case ELFDATA2MSB:
/* We are little endian, DSO is big endian. */
if (*(unsigned char const *)&endian != 0)
dso->needs_swap = DSO_SWAP__YES;
break;
default:
pr_err("unrecognized DSO data encoding %d\n", eidata);
return -EINVAL;
}
return 0;
}
bool symsrc__possibly_runtime(struct symsrc *ss)
{
return ss->dynsym || ss->opdsec;
}
bool symsrc__has_symtab(struct symsrc *ss)
{
return ss->symtab != NULL;
}
void symsrc__destroy(struct symsrc *ss)
{
zfree(&ss->name);
elf_end(ss->elf);
close(ss->fd);
}
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
{
/*
* Usually vmlinux is an ELF file with type ET_EXEC for most
* architectures; except Arm64 kernel is linked with option
* '-share', so need to check type ET_DYN.
*/
return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
ehdr.e_type == ET_DYN;
}
int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
enum dso_binary_type type)
{
GElf_Ehdr ehdr;
Elf *elf;
int fd;
if (dso__needs_decompress(dso)) {
fd = dso__decompress_kmodule_fd(dso, name);
if (fd < 0)
return -1;
type = dso->symtab_type;
} else {
fd = open(name, O_RDONLY);
if (fd < 0) {
dso->load_errno = errno;
return -1;
}
}
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL) {
dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
pr_debug("%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
goto out_elf_end;
}
/* Always reject images with a mismatched build-id: */
if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
u8 build_id[BUILD_ID_SIZE];
struct build_id bid;
int size;
size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE);
if (size <= 0) {
dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
goto out_elf_end;
}
build_id__init(&bid, build_id, size);
if (!dso__build_id_equal(dso, &bid)) {
pr_debug("%s: build id mismatch for %s.\n", __func__, name);
dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
goto out_elf_end;
}
}
ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
ss->symtab_idx = 0;
ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
&ss->symtab_idx);
if (ss->symshdr.sh_type != SHT_SYMTAB)
ss->symtab = NULL;
ss->dynsym_idx = 0;
ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
&ss->dynsym_idx);
if (ss->dynshdr.sh_type != SHT_DYNSYM)
ss->dynsym = NULL;
ss->opdidx = 0;
ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
&ss->opdidx);
if (ss->opdshdr.sh_type != SHT_PROGBITS)
ss->opdsec = NULL;
if (dso->kernel == DSO_SPACE__USER)
ss->adjust_symbols = true;
else
ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
ss->name = strdup(name);
if (!ss->name) {
dso->load_errno = errno;
goto out_elf_end;
}
ss->elf = elf;
ss->fd = fd;
ss->ehdr = ehdr;
ss->type = type;
return 0;
out_elf_end:
elf_end(elf);
out_close:
close(fd);
return -1;
}
/**
* ref_reloc_sym_not_found - has kernel relocation symbol been found.
* @kmap: kernel maps and relocation reference symbol
*
* This function returns %true if we are dealing with the kernel maps and the
* relocation reference symbol has not yet been found. Otherwise %false is
* returned.
*/
static bool ref_reloc_sym_not_found(struct kmap *kmap)
{
return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
!kmap->ref_reloc_sym->unrelocated_addr;
}
/**
* ref_reloc - kernel relocation offset.
* @kmap: kernel maps and relocation reference symbol
*
* This function returns the offset of kernel addresses as determined by using
* the relocation reference symbol i.e. if the kernel has not been relocated
* then the return value is zero.
*/
static u64 ref_reloc(struct kmap *kmap)
{
if (kmap && kmap->ref_reloc_sym &&
kmap->ref_reloc_sym->unrelocated_addr)
return kmap->ref_reloc_sym->addr -
kmap->ref_reloc_sym->unrelocated_addr;
return 0;
}
void __weak arch__sym_update(struct symbol *s __maybe_unused,
GElf_Sym *sym __maybe_unused) { }
static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
GElf_Sym *sym, GElf_Shdr *shdr,
struct maps *kmaps, struct kmap *kmap,
struct dso **curr_dsop, struct map **curr_mapp,
const char *section_name,
bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
{
struct dso *curr_dso = *curr_dsop;
struct map *curr_map;
char dso_name[PATH_MAX];
/* Adjust symbol to map to file offset */
if (adjust_kernel_syms)
sym->st_value -= shdr->sh_addr - shdr->sh_offset;
if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
return 0;
if (strcmp(section_name, ".text") == 0) {
/*
* The initial kernel mapping is based on
* kallsyms and identity maps. Overwrite it to
* map to the kernel dso.
*/
if (*remap_kernel && dso->kernel && !kmodule) {
*remap_kernel = false;
map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
map__set_end(map, map__start(map) + shdr->sh_size);
map__set_pgoff(map, shdr->sh_offset);
map__set_map_ip(map, map__dso_map_ip);
map__set_unmap_ip(map, map__dso_unmap_ip);
/* Ensure maps are correctly ordered */
if (kmaps) {
int err;
struct map *tmp = map__get(map);
maps__remove(kmaps, map);
err = maps__insert(kmaps, map);
map__put(tmp);
if (err)
return err;
}
}
/*
* The initial module mapping is based on
* /proc/modules mapped to offset zero.
* Overwrite it to map to the module dso.
*/
if (*remap_kernel && kmodule) {
*remap_kernel = false;
map__set_pgoff(map, shdr->sh_offset);
}
*curr_mapp = map;
*curr_dsop = dso;
return 0;
}
if (!kmap)
return 0;
snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
curr_map = maps__find_by_name(kmaps, dso_name);
if (curr_map == NULL) {
u64 start = sym->st_value;
if (kmodule)
start += map__start(map) + shdr->sh_offset;
curr_dso = dso__new(dso_name);
if (curr_dso == NULL)
return -1;
curr_dso->kernel = dso->kernel;
curr_dso->long_name = dso->long_name;
curr_dso->long_name_len = dso->long_name_len;
curr_dso->binary_type = dso->binary_type;
curr_dso->adjust_symbols = dso->adjust_symbols;
curr_map = map__new2(start, curr_dso);
dso__put(curr_dso);
if (curr_map == NULL)
return -1;
if (curr_dso->kernel)
map__kmap(curr_map)->kmaps = kmaps;
if (adjust_kernel_syms) {
map__set_start(curr_map, shdr->sh_addr + ref_reloc(kmap));
map__set_end(curr_map, map__start(curr_map) + shdr->sh_size);
map__set_pgoff(curr_map, shdr->sh_offset);
} else {
map__set_map_ip(curr_map, identity__map_ip);
map__set_unmap_ip(curr_map, identity__map_ip);
}
curr_dso->symtab_type = dso->symtab_type;
if (maps__insert(kmaps, curr_map))
return -1;
/*
* Add it before we drop the reference to curr_map, i.e. while
* we still are sure to have a reference to this DSO via
* *curr_map->dso.
*/
dsos__add(&maps__machine(kmaps)->dsos, curr_dso);
/* kmaps already got it */
map__put(curr_map);
dso__set_loaded(curr_dso);
*curr_mapp = curr_map;
*curr_dsop = curr_dso;
} else
*curr_dsop = map__dso(curr_map);
return 0;
}
static int
dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
struct symsrc *runtime_ss, int kmodule, int dynsym)
{
struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
struct map *curr_map = map;
struct dso *curr_dso = dso;
Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym;
uint32_t nr_syms;
int err = -1;
uint32_t idx;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
GElf_Shdr tshdr;
Elf_Data *syms, *opddata = NULL;
GElf_Sym sym;
Elf_Scn *sec, *sec_strndx;
Elf *elf;
int nr = 0;
bool remap_kernel = false, adjust_kernel_syms = false;
if (kmap && !kmaps)
return -1;
elf = syms_ss->elf;
ehdr = syms_ss->ehdr;
if (dynsym) {
sec = syms_ss->dynsym;
shdr = syms_ss->dynshdr;
} else {
sec = syms_ss->symtab;
shdr = syms_ss->symshdr;
}
if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
".text", NULL))
dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
if (runtime_ss->opdsec)
opddata = elf_rawdata(runtime_ss->opdsec, NULL);
syms = elf_getdata(sec, NULL);
if (syms == NULL)
goto out_elf_end;
sec = elf_getscn(elf, shdr.sh_link);
if (sec == NULL)
goto out_elf_end;
symstrs = elf_getdata(sec, NULL);
if (symstrs == NULL)
goto out_elf_end;
sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
if (sec_strndx == NULL)
goto out_elf_end;
secstrs_run = elf_getdata(sec_strndx, NULL);
if (secstrs_run == NULL)
goto out_elf_end;
sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
if (sec_strndx == NULL)
goto out_elf_end;
secstrs_sym = elf_getdata(sec_strndx, NULL);
if (secstrs_sym == NULL)
goto out_elf_end;
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
/*
* The kernel relocation symbol is needed in advance in order to adjust
* kernel maps correctly.
*/
if (ref_reloc_sym_not_found(kmap)) {
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
const char *elf_name = elf_sym__name(&sym, symstrs);
if (strcmp(elf_name, kmap->ref_reloc_sym->name))
continue;
kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
map__set_reloc(map, kmap->ref_reloc_sym->addr - kmap->ref_reloc_sym->unrelocated_addr);
break;
}
}
/*
* Handle any relocation of vdso necessary because older kernels
* attempted to prelink vdso to its virtual address.
*/
if (dso__is_vdso(dso))
map__set_reloc(map, map__start(map) - dso->text_offset);
dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
/*
* Initial kernel and module mappings do not map to the dso.
* Flag the fixups.
*/
if (dso->kernel) {
remap_kernel = true;
adjust_kernel_syms = dso->adjust_symbols;
}
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f;
const char *elf_name = elf_sym__name(&sym, symstrs);
char *demangled = NULL;
int is_label = elf_sym__is_label(&sym);
const char *section_name;
bool used_opd = false;
if (!is_label && !elf_sym__filter(&sym))
continue;
/* Reject ARM ELF "mapping symbols": these aren't unique and
* don't identify functions, so will confuse the profile
* output: */
if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
&& (elf_name[2] == '\0' || elf_name[2] == '.'))
continue;
}
if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
u64 *opd = opddata->d_buf + offset;
sym.st_value = DSO__SWAP(dso, u64, *opd);
sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
sym.st_value);
used_opd = true;
}
/*
* When loading symbols in a data mapping, ABS symbols (which
* has a value of SHN_ABS in its st_shndx) failed at
* elf_getscn(). And it marks the loading as a failure so
* already loaded symbols cannot be fixed up.
*
* I'm not sure what should be done. Just ignore them for now.
* - Namhyung Kim
*/
if (sym.st_shndx == SHN_ABS)
continue;
sec = elf_getscn(syms_ss->elf, sym.st_shndx);
if (!sec)
goto out_elf_end;
gelf_getshdr(sec, &shdr);
/*
* If the attribute bit SHF_ALLOC is not set, the section
* doesn't occupy memory during process execution.
* E.g. ".gnu.warning.*" section is used by linker to generate
* warnings when calling deprecated functions, the symbols in
* the section aren't loaded to memory during process execution,
* so skip them.
*/
if (!(shdr.sh_flags & SHF_ALLOC))
continue;
secstrs = secstrs_sym;
/*
* We have to fallback to runtime when syms' section header has
* NOBITS set. NOBITS results in file offset (sh_offset) not
* being incremented. So sh_offset used below has different
* values for syms (invalid) and runtime (valid).
*/
if (shdr.sh_type == SHT_NOBITS) {
sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
if (!sec)
goto out_elf_end;
gelf_getshdr(sec, &shdr);
secstrs = secstrs_run;
}
if (is_label && !elf_sec__filter(&shdr, secstrs))
continue;
section_name = elf_sec__name(&shdr, secstrs);
/* On ARM, symbols for thumb functions have 1 added to
* the symbol address as a flag - remove it */
if ((ehdr.e_machine == EM_ARM) &&
(GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
(sym.st_value & 1))
--sym.st_value;
if (dso->kernel) {
if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
section_name, adjust_kernel_syms, kmodule, &remap_kernel))
goto out_elf_end;
} else if ((used_opd && runtime_ss->adjust_symbols) ||
(!used_opd && syms_ss->adjust_symbols)) {
GElf_Phdr phdr;
if (elf_read_program_header(runtime_ss->elf,
(u64)sym.st_value, &phdr)) {
pr_debug4("%s: failed to find program header for "
"symbol: %s st_value: %#" PRIx64 "\n",
__func__, elf_name, (u64)sym.st_value);
pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
"sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n",
__func__, (u64)sym.st_value, (u64)shdr.sh_addr,
(u64)shdr.sh_offset);
/*
* Fail to find program header, let's rollback
* to use shdr.sh_addr and shdr.sh_offset to
* calibrate symbol's file address, though this
* is not necessary for normal C ELF file, we
* still need to handle java JIT symbols in this
* case.
*/
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
} else {
pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
"p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
__func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
(u64)phdr.p_offset);
sym.st_value -= phdr.p_vaddr - phdr.p_offset;
}
}
demangled = demangle_sym(dso, kmodule, elf_name);
if (demangled != NULL)
elf_name = demangled;
f = symbol__new(sym.st_value, sym.st_size,
GELF_ST_BIND(sym.st_info),
GELF_ST_TYPE(sym.st_info), elf_name);
free(demangled);
if (!f)
goto out_elf_end;
arch__sym_update(f, &sym);
__symbols__insert(&curr_dso->symbols, f, dso->kernel);
nr++;
}
/*
* For misannotated, zeroed, ASM function sizes.
*/
if (nr > 0) {
symbols__fixup_end(&dso->symbols, false);
symbols__fixup_duplicate(&dso->symbols);
if (kmap) {
/*
* We need to fixup this here too because we create new
* maps here, for things like vsyscall sections.
*/
maps__fixup_end(kmaps);
}
}
err = nr;
out_elf_end:
return err;
}
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
struct symsrc *runtime_ss, int kmodule)
{
int nr = 0;
int err = -1;
dso->symtab_type = syms_ss->type;
dso->is_64_bit = syms_ss->is_64_bit;
dso->rel = syms_ss->ehdr.e_type == ET_REL;
/*
* Modules may already have symbols from kallsyms, but those symbols
* have the wrong values for the dso maps, so remove them.
*/
if (kmodule && syms_ss->symtab)
symbols__delete(&dso->symbols);
if (!syms_ss->symtab) {
/*
* If the vmlinux is stripped, fail so we will fall back
* to using kallsyms. The vmlinux runtime symbols aren't
* of much use.
*/
if (dso->kernel)
return err;
} else {
err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
kmodule, 0);
if (err < 0)
return err;
nr = err;
}
if (syms_ss->dynsym) {
err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
kmodule, 1);
if (err < 0)
return err;
err += nr;
}
return err;
}
static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
{
GElf_Phdr phdr;
size_t i, phdrnum;
int err;
u64 sz;
if (elf_getphdrnum(elf, &phdrnum))
return -1;
for (i = 0; i < phdrnum; i++) {
if (gelf_getphdr(elf, i, &phdr) == NULL)
return -1;
if (phdr.p_type != PT_LOAD)
continue;
if (exe) {
if (!(phdr.p_flags & PF_X))
continue;
} else {
if (!(phdr.p_flags & PF_R))
continue;
}
sz = min(phdr.p_memsz, phdr.p_filesz);
if (!sz)
continue;
err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
if (err)
return err;
}
return 0;
}
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
bool *is_64_bit)
{
int err;
Elf *elf;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
return -1;
if (is_64_bit)
*is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
err = elf_read_maps(elf, exe, mapfn, data);
elf_end(elf);
return err;
}
enum dso_type dso__type_fd(int fd)
{
enum dso_type dso_type = DSO__TYPE_UNKNOWN;
GElf_Ehdr ehdr;
Elf_Kind ek;
Elf *elf;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
goto out;
ek = elf_kind(elf);
if (ek != ELF_K_ELF)
goto out_end;
if (gelf_getclass(elf) == ELFCLASS64) {
dso_type = DSO__TYPE_64BIT;
goto out_end;
}
if (gelf_getehdr(elf, &ehdr) == NULL)
goto out_end;
if (ehdr.e_machine == EM_X86_64)
dso_type = DSO__TYPE_X32BIT;
else
dso_type = DSO__TYPE_32BIT;
out_end:
elf_end(elf);
out:
return dso_type;
}
static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
{
ssize_t r;
size_t n;
int err = -1;
char *buf = malloc(page_size);
if (buf == NULL)
return -1;
if (lseek(to, to_offs, SEEK_SET) != to_offs)
goto out;
if (lseek(from, from_offs, SEEK_SET) != from_offs)
goto out;
while (len) {
n = page_size;
if (len < n)
n = len;
/* Use read because mmap won't work on proc files */
r = read(from, buf, n);
if (r < 0)
goto out;
if (!r)
break;
n = r;
r = write(to, buf, n);
if (r < 0)
goto out;
if ((size_t)r != n)
goto out;
len -= n;
}
err = 0;
out:
free(buf);
return err;
}
struct kcore {
int fd;
int elfclass;
Elf *elf;
GElf_Ehdr ehdr;
};
static int kcore__open(struct kcore *kcore, const char *filename)
{
GElf_Ehdr *ehdr;
kcore->fd = open(filename, O_RDONLY);
if (kcore->fd == -1)
return -1;
kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
if (!kcore->elf)
goto out_close;
kcore->elfclass = gelf_getclass(kcore->elf);
if (kcore->elfclass == ELFCLASSNONE)
goto out_end;
ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
if (!ehdr)
goto out_end;
return 0;
out_end:
elf_end(kcore->elf);
out_close:
close(kcore->fd);
return -1;
}
static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
bool temp)
{
kcore->elfclass = elfclass;
if (temp)
kcore->fd = mkstemp(filename);
else
kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
if (kcore->fd == -1)
return -1;
kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
if (!kcore->elf)
goto out_close;
if (!gelf_newehdr(kcore->elf, elfclass))
goto out_end;
memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
return 0;
out_end:
elf_end(kcore->elf);
out_close:
close(kcore->fd);
unlink(filename);
return -1;
}
static void kcore__close(struct kcore *kcore)
{
elf_end(kcore->elf);
close(kcore->fd);
}
static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
{
GElf_Ehdr *ehdr = &to->ehdr;
GElf_Ehdr *kehdr = &from->ehdr;
memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
ehdr->e_type = kehdr->e_type;
ehdr->e_machine = kehdr->e_machine;
ehdr->e_version = kehdr->e_version;
ehdr->e_entry = 0;
ehdr->e_shoff = 0;
ehdr->e_flags = kehdr->e_flags;
ehdr->e_phnum = count;
ehdr->e_shentsize = 0;
ehdr->e_shnum = 0;
ehdr->e_shstrndx = 0;
if (from->elfclass == ELFCLASS32) {
ehdr->e_phoff = sizeof(Elf32_Ehdr);
ehdr->e_ehsize = sizeof(Elf32_Ehdr);
ehdr->e_phentsize = sizeof(Elf32_Phdr);
} else {
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
}
if (!gelf_update_ehdr(to->elf, ehdr))
return -1;
if (!gelf_newphdr(to->elf, count))
return -1;
return 0;
}
static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
u64 addr, u64 len)
{
GElf_Phdr phdr = {
.p_type = PT_LOAD,
.p_flags = PF_R | PF_W | PF_X,
.p_offset = offset,
.p_vaddr = addr,
.p_paddr = 0,
.p_filesz = len,
.p_memsz = len,
.p_align = page_size,
};
if (!gelf_update_phdr(kcore->elf, idx, &phdr))
return -1;
return 0;
}
static off_t kcore__write(struct kcore *kcore)
{
return elf_update(kcore->elf, ELF_C_WRITE);
}
struct phdr_data {
off_t offset;
off_t rel;
u64 addr;
u64 len;
struct list_head node;
struct phdr_data *remaps;
};
struct sym_data {
u64 addr;
struct list_head node;
};
struct kcore_copy_info {
u64 stext;
u64 etext;
u64 first_symbol;
u64 last_symbol;
u64 first_module;
u64 first_module_symbol;
u64 last_module_symbol;
size_t phnum;
struct list_head phdrs;
struct list_head syms;
};
#define kcore_copy__for_each_phdr(k, p) \
list_for_each_entry((p), &(k)->phdrs, node)
static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
{
struct phdr_data *p = zalloc(sizeof(*p));
if (p) {
p->addr = addr;
p->len = len;
p->offset = offset;
}
return p;
}
static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
u64 addr, u64 len,
off_t offset)
{
struct phdr_data *p = phdr_data__new(addr, len, offset);
if (p)
list_add_tail(&p->node, &kci->phdrs);
return p;
}
static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
{
struct phdr_data *p, *tmp;
list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
list_del_init(&p->node);
free(p);
}
}
static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
u64 addr)
{
struct sym_data *s = zalloc(sizeof(*s));
if (s) {
s->addr = addr;
list_add_tail(&s->node, &kci->syms);
}
return s;
}
static void kcore_copy__free_syms(struct kcore_copy_info *kci)
{
struct sym_data *s, *tmp;
list_for_each_entry_safe(s, tmp, &kci->syms, node) {
list_del_init(&s->node);
free(s);
}
}
static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
u64 start)
{
struct kcore_copy_info *kci = arg;
if (!kallsyms__is_function(type))
return 0;
if (strchr(name, '[')) {
if (!kci->first_module_symbol || start < kci->first_module_symbol)
kci->first_module_symbol = start;
if (start > kci->last_module_symbol)
kci->last_module_symbol = start;
return 0;
}
if (!kci->first_symbol || start < kci->first_symbol)
kci->first_symbol = start;
if (!kci->last_symbol || start > kci->last_symbol)
kci->last_symbol = start;
if (!strcmp(name, "_stext")) {
kci->stext = start;
return 0;
}
if (!strcmp(name, "_etext")) {
kci->etext = start;
return 0;
}
if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
return -1;
return 0;
}
static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
const char *dir)
{
char kallsyms_filename[PATH_MAX];
scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
return -1;
if (kallsyms__parse(kallsyms_filename, kci,
kcore_copy__process_kallsyms) < 0)
return -1;
return 0;
}
static int kcore_copy__process_modules(void *arg,
const char *name __maybe_unused,
u64 start, u64 size __maybe_unused)
{
struct kcore_copy_info *kci = arg;
if (!kci->first_module || start < kci->first_module)
kci->first_module = start;
return 0;
}
static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
const char *dir)
{
char modules_filename[PATH_MAX];
scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
if (symbol__restricted_filename(modules_filename, "/proc/modules"))
return -1;
if (modules__parse(modules_filename, kci,
kcore_copy__process_modules) < 0)
return -1;
return 0;
}
static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
u64 pgoff, u64 s, u64 e)
{
u64 len, offset;
if (s < start || s >= end)
return 0;
offset = (s - start) + pgoff;
len = e < end ? e - s : end - s;
return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
}
static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
{
struct kcore_copy_info *kci = data;
u64 end = start + len;
struct sym_data *sdat;
if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
return -1;
if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
kci->last_module_symbol))
return -1;
list_for_each_entry(sdat, &kci->syms, node) {
u64 s = round_down(sdat->addr, page_size);
if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
return -1;
}
return 0;
}
static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
{
if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
return -1;
return 0;
}
static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
{
struct phdr_data *p, *k = NULL;
u64 kend;
if (!kci->stext)
return;
/* Find phdr that corresponds to the kernel map (contains stext) */
kcore_copy__for_each_phdr(kci, p) {
u64 pend = p->addr + p->len - 1;
if (p->addr <= kci->stext && pend >= kci->stext) {
k = p;
break;
}
}
if (!k)
return;
kend = k->offset + k->len;
/* Find phdrs that remap the kernel */
kcore_copy__for_each_phdr(kci, p) {
u64 pend = p->offset + p->len;
if (p == k)
continue;
if (p->offset >= k->offset && pend <= kend)
p->remaps = k;
}
}
static void kcore_copy__layout(struct kcore_copy_info *kci)
{
struct phdr_data *p;
off_t rel = 0;
kcore_copy__find_remaps(kci);
kcore_copy__for_each_phdr(kci, p) {
if (!p->remaps) {
p->rel = rel;
rel += p->len;
}
kci->phnum += 1;
}
kcore_copy__for_each_phdr(kci, p) {
struct phdr_data *k = p->remaps;
if (k)
p->rel = p->offset - k->offset + k->rel;
}
}
static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
Elf *elf)
{
if (kcore_copy__parse_kallsyms(kci, dir))
return -1;
if (kcore_copy__parse_modules(kci, dir))
return -1;
if (kci->stext)
kci->stext = round_down(kci->stext, page_size);
else
kci->stext = round_down(kci->first_symbol, page_size);
if (kci->etext) {
kci->etext = round_up(kci->etext, page_size);
} else if (kci->last_symbol) {
kci->etext = round_up(kci->last_symbol, page_size);
kci->etext += page_size;
}
if (kci->first_module_symbol &&
(!kci->first_module || kci->first_module_symbol < kci->first_module))
kci->first_module = kci->first_module_symbol;
kci->first_module = round_down(kci->first_module, page_size);
if (kci->last_module_symbol) {
kci->last_module_symbol = round_up(kci->last_module_symbol,
page_size);
kci->last_module_symbol += page_size;
}
if (!kci->stext || !kci->etext)
return -1;
if (kci->first_module && !kci->last_module_symbol)
return -1;
if (kcore_copy__read_maps(kci, elf))
return -1;
kcore_copy__layout(kci);
return 0;
}
static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
const char *name)
{
char from_filename[PATH_MAX];
char to_filename[PATH_MAX];
scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
return copyfile_mode(from_filename, to_filename, 0400);
}
static int kcore_copy__unlink(const char *dir, const char *name)
{
char filename[PATH_MAX];
scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
return unlink(filename);
}
static int kcore_copy__compare_fds(int from, int to)
{
char *buf_from;
char *buf_to;
ssize_t ret;
size_t len;
int err = -1;
buf_from = malloc(page_size);
buf_to = malloc(page_size);
if (!buf_from || !buf_to)
goto out;
while (1) {
/* Use read because mmap won't work on proc files */
ret = read(from, buf_from, page_size);
if (ret < 0)
goto out;
if (!ret)
break;
len = ret;
if (readn(to, buf_to, len) != (int)len)
goto out;
if (memcmp(buf_from, buf_to, len))
goto out;
}
err = 0;
out:
free(buf_to);
free(buf_from);
return err;
}
static int kcore_copy__compare_files(const char *from_filename,
const char *to_filename)
{
int from, to, err = -1;
from = open(from_filename, O_RDONLY);
if (from < 0)
return -1;
to = open(to_filename, O_RDONLY);
if (to < 0)
goto out_close_from;
err = kcore_copy__compare_fds(from, to);
close(to);
out_close_from:
close(from);
return err;
}
static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
const char *name)
{
char from_filename[PATH_MAX];
char to_filename[PATH_MAX];
scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
return kcore_copy__compare_files(from_filename, to_filename);
}
/**
* kcore_copy - copy kallsyms, modules and kcore from one directory to another.
* @from_dir: from directory
* @to_dir: to directory
*
* This function copies kallsyms, modules and kcore files from one directory to
* another. kallsyms and modules are copied entirely. Only code segments are
* copied from kcore. It is assumed that two segments suffice: one for the
* kernel proper and one for all the modules. The code segments are determined
* from kallsyms and modules files. The kernel map starts at _stext or the
* lowest function symbol, and ends at _etext or the highest function symbol.
* The module map starts at the lowest module address and ends at the highest
* module symbol. Start addresses are rounded down to the nearest page. End
* addresses are rounded up to the nearest page. An extra page is added to the
* highest kernel symbol and highest module symbol to, hopefully, encompass that
* symbol too. Because it contains only code sections, the resulting kcore is
* unusual. One significant peculiarity is that the mapping (start -> pgoff)
* is not the same for the kernel map and the modules map. That happens because
* the data is copied adjacently whereas the original kcore has gaps. Finally,
* kallsyms file is compared with its copy to check that modules have not been
* loaded or unloaded while the copies were taking place.
*
* Return: %0 on success, %-1 on failure.
*/
int kcore_copy(const char *from_dir, const char *to_dir)
{
struct kcore kcore;
struct kcore extract;
int idx = 0, err = -1;
off_t offset, sz;
struct kcore_copy_info kci = { .stext = 0, };
char kcore_filename[PATH_MAX];
char extract_filename[PATH_MAX];
struct phdr_data *p;
INIT_LIST_HEAD(&kci.phdrs);
INIT_LIST_HEAD(&kci.syms);
if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
return -1;
if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
goto out_unlink_kallsyms;
scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
if (kcore__open(&kcore, kcore_filename))
goto out_unlink_modules;
if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
goto out_kcore_close;
if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
goto out_kcore_close;
if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
goto out_extract_close;
offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
offset = round_up(offset, page_size);
kcore_copy__for_each_phdr(&kci, p) {
off_t offs = p->rel + offset;
if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
goto out_extract_close;
}
sz = kcore__write(&extract);
if (sz < 0 || sz > offset)
goto out_extract_close;
kcore_copy__for_each_phdr(&kci, p) {
off_t offs = p->rel + offset;
if (p->remaps)
continue;
if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
goto out_extract_close;
}
if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
goto out_extract_close;
err = 0;
out_extract_close:
kcore__close(&extract);
if (err)
unlink(extract_filename);
out_kcore_close:
kcore__close(&kcore);
out_unlink_modules:
if (err)
kcore_copy__unlink(to_dir, "modules");
out_unlink_kallsyms:
if (err)
kcore_copy__unlink(to_dir, "kallsyms");
kcore_copy__free_phdrs(&kci);
kcore_copy__free_syms(&kci);
return err;
}
int kcore_extract__create(struct kcore_extract *kce)
{
struct kcore kcore;
struct kcore extract;
size_t count = 1;
int idx = 0, err = -1;
off_t offset = page_size, sz;
if (kcore__open(&kcore, kce->kcore_filename))
return -1;
strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
goto out_kcore_close;
if (kcore__copy_hdr(&kcore, &extract, count))
goto out_extract_close;
if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
goto out_extract_close;
sz = kcore__write(&extract);
if (sz < 0 || sz > offset)
goto out_extract_close;
if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
goto out_extract_close;
err = 0;
out_extract_close:
kcore__close(&extract);
if (err)
unlink(kce->extract_filename);
out_kcore_close:
kcore__close(&kcore);
return err;
}
void kcore_extract__delete(struct kcore_extract *kce)
{
unlink(kce->extract_filename);
}
#ifdef HAVE_GELF_GETNOTE_SUPPORT
static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
{
if (!base_off)
return;
if (tmp->bit32)
tmp->addr.a32[SDT_NOTE_IDX_LOC] =
tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
tmp->addr.a32[SDT_NOTE_IDX_BASE];
else
tmp->addr.a64[SDT_NOTE_IDX_LOC] =
tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
tmp->addr.a64[SDT_NOTE_IDX_BASE];
}
static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
GElf_Addr base_off)
{
if (!base_off)
return;
if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
}
/**
* populate_sdt_note : Parse raw data and identify SDT note
* @elf: elf of the opened file
* @data: raw data of a section with description offset applied
* @len: note description size
* @type: type of the note
* @sdt_notes: List to add the SDT note
*
* Responsible for parsing the @data in section .note.stapsdt in @elf and
* if its an SDT note, it appends to @sdt_notes list.
*/
static int populate_sdt_note(Elf **elf, const char *data, size_t len,
struct list_head *sdt_notes)
{
const char *provider, *name, *args;
struct sdt_note *tmp = NULL;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
int ret = -EINVAL;
union {
Elf64_Addr a64[NR_ADDR];
Elf32_Addr a32[NR_ADDR];
} buf;
Elf_Data dst = {
.d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
.d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
.d_off = 0, .d_align = 0
};
Elf_Data src = {
.d_buf = (void *) data, .d_type = ELF_T_ADDR,
.d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
.d_align = 0
};
tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
if (!tmp) {
ret = -ENOMEM;
goto out_err;
}
INIT_LIST_HEAD(&tmp->note_list);
if (len < dst.d_size + 3)
goto out_free_note;
/* Translation from file representation to memory representation */
if (gelf_xlatetom(*elf, &dst, &src,
elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
goto out_free_note;
}
/* Populate the fields of sdt_note */
provider = data + dst.d_size;
name = (const char *)memchr(provider, '\0', data + len - provider);
if (name++ == NULL)
goto out_free_note;
tmp->provider = strdup(provider);
if (!tmp->provider) {
ret = -ENOMEM;
goto out_free_note;
}
tmp->name = strdup(name);
if (!tmp->name) {
ret = -ENOMEM;
goto out_free_prov;
}
args = memchr(name, '\0', data + len - name);
/*
* There is no argument if:
* - We reached the end of the note;
* - There is not enough room to hold a potential string;
* - The argument string is empty or just contains ':'.
*/
if (args == NULL || data + len - args < 2 ||
args[1] == ':' || args[1] == '\0')
tmp->args = NULL;
else {
tmp->args = strdup(++args);
if (!tmp->args) {
ret = -ENOMEM;
goto out_free_name;
}
}
if (gelf_getclass(*elf) == ELFCLASS32) {
memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
tmp->bit32 = true;
} else {
memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
tmp->bit32 = false;
}
if (!gelf_getehdr(*elf, &ehdr)) {
pr_debug("%s : cannot get elf header.\n", __func__);
ret = -EBADF;
goto out_free_args;
}
/* Adjust the prelink effect :
* Find out the .stapsdt.base section.
* This scn will help us to handle prelinking (if present).
* Compare the retrieved file offset of the base section with the
* base address in the description of the SDT note. If its different,
* then accordingly, adjust the note location.
*/
if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
sdt_adjust_loc(tmp, shdr.sh_offset);
/* Adjust reference counter offset */
if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
list_add_tail(&tmp->note_list, sdt_notes);
return 0;
out_free_args:
zfree(&tmp->args);
out_free_name:
zfree(&tmp->name);
out_free_prov:
zfree(&tmp->provider);
out_free_note:
free(tmp);
out_err:
return ret;
}
/**
* construct_sdt_notes_list : constructs a list of SDT notes
* @elf : elf to look into
* @sdt_notes : empty list_head
*
* Scans the sections in 'elf' for the section
* .note.stapsdt. It, then calls populate_sdt_note to find
* out the SDT events and populates the 'sdt_notes'.
*/
static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
{
GElf_Ehdr ehdr;
Elf_Scn *scn = NULL;
Elf_Data *data;
GElf_Shdr shdr;
size_t shstrndx, next;
GElf_Nhdr nhdr;
size_t name_off, desc_off, offset;
int ret = 0;
if (gelf_getehdr(elf, &ehdr) == NULL) {
ret = -EBADF;
goto out_ret;
}
if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
ret = -EBADF;
goto out_ret;
}
/* Look for the required section */
scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
if (!scn) {
ret = -ENOENT;
goto out_ret;
}
if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
ret = -ENOENT;
goto out_ret;
}
data = elf_getdata(scn, NULL);
/* Get the SDT notes */
for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
&desc_off)) > 0; offset = next) {
if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
!memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
sizeof(SDT_NOTE_NAME))) {
/* Check the type of the note */
if (nhdr.n_type != SDT_NOTE_TYPE)
goto out_ret;
ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
nhdr.n_descsz, sdt_notes);
if (ret < 0)
goto out_ret;
}
}
if (list_empty(sdt_notes))
ret = -ENOENT;
out_ret:
return ret;
}
/**
* get_sdt_note_list : Wrapper to construct a list of sdt notes
* @head : empty list_head
* @target : file to find SDT notes from
*
* This opens the file, initializes
* the ELF and then calls construct_sdt_notes_list.
*/
int get_sdt_note_list(struct list_head *head, const char *target)
{
Elf *elf;
int fd, ret;
fd = open(target, O_RDONLY);
if (fd < 0)
return -EBADF;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (!elf) {
ret = -EBADF;
goto out_close;
}
ret = construct_sdt_notes_list(elf, head);
elf_end(elf);
out_close:
close(fd);
return ret;
}
/**
* cleanup_sdt_note_list : free the sdt notes' list
* @sdt_notes: sdt notes' list
*
* Free up the SDT notes in @sdt_notes.
* Returns the number of SDT notes free'd.
*/
int cleanup_sdt_note_list(struct list_head *sdt_notes)
{
struct sdt_note *tmp, *pos;
int nr_free = 0;
list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
list_del_init(&pos->note_list);
zfree(&pos->args);
zfree(&pos->name);
zfree(&pos->provider);
free(pos);
nr_free++;
}
return nr_free;
}
/**
* sdt_notes__get_count: Counts the number of sdt events
* @start: list_head to sdt_notes list
*
* Returns the number of SDT notes in a list
*/
int sdt_notes__get_count(struct list_head *start)
{
struct sdt_note *sdt_ptr;
int count = 0;
list_for_each_entry(sdt_ptr, start, note_list)
count++;
return count;
}
#endif
void symbol__elf_init(void)
{
elf_version(EV_CURRENT);
}
| linux-master | tools/perf/util/symbol-elf.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <regex.h>
#include <stdlib.h>
#include <linux/mman.h>
#include <linux/time64.h>
#include "debug.h"
#include "dso.h"
#include "sort.h"
#include "hist.h"
#include "cacheline.h"
#include "comm.h"
#include "map.h"
#include "maps.h"
#include "symbol.h"
#include "map_symbol.h"
#include "branch.h"
#include "thread.h"
#include "evsel.h"
#include "evlist.h"
#include "srcline.h"
#include "strlist.h"
#include "strbuf.h"
#include "mem-events.h"
#include "annotate.h"
#include "event.h"
#include "time-utils.h"
#include "cgroup.h"
#include "machine.h"
#include "trace-event.h"
#include <linux/kernel.h>
#include <linux/string.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char *default_sort_order = "comm,dso,symbol";
const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
const char default_top_sort_order[] = "dso,symbol";
const char default_diff_sort_order[] = "dso,symbol";
const char default_tracepoint_sort_order[] = "trace";
const char *sort_order;
const char *field_order;
regex_t ignore_callees_regex;
int have_ignore_callees = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
/*
* Some architectures have Adjacent Cacheline Prefetch feature, which
* behaves like the cacheline size is doubled. Enable this flag to
* check things in double cacheline granularity.
*/
bool chk_double_cl;
/*
* Replaces all occurrences of a char used with the:
*
* -t, --field-separator
*
* option, that uses a special separator character and don't pad with spaces,
* replacing all occurrences of this separator in symbol names (and other
* output) with a '.' character, that thus it's the only non valid separator.
*/
static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
{
int n;
va_list ap;
va_start(ap, fmt);
n = vsnprintf(bf, size, fmt, ap);
if (symbol_conf.field_sep && n > 0) {
char *sep = bf;
while (1) {
sep = strchr(sep, *symbol_conf.field_sep);
if (sep == NULL)
break;
*sep = '.';
}
}
va_end(ap);
if (n >= (int)size)
return size - 1;
return n;
}
static int64_t cmp_null(const void *l, const void *r)
{
if (!l && !r)
return 0;
else if (!l)
return -1;
else
return 1;
}
/* --sort pid */
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
return thread__tid(right->thread) - thread__tid(left->thread);
}
static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
const char *comm = thread__comm_str(he->thread);
width = max(7U, width) - 8;
return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
width, width, comm ?: "");
}
static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
{
const struct thread *th = arg;
if (type != HIST_FILTER__THREAD)
return -1;
return th && RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(th);
}
struct sort_entry sort_thread = {
.se_header = " Pid:Command",
.se_cmp = sort__thread_cmp,
.se_snprintf = hist_entry__thread_snprintf,
.se_filter = hist_entry__thread_filter,
.se_width_idx = HISTC_THREAD,
};
/* --sort simd */
static int64_t
sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (left->simd_flags.arch != right->simd_flags.arch)
return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
}
static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
{
u64 arch = simd_flags->arch;
if (arch & SIMD_OP_FLAGS_ARCH_SVE)
return "SVE";
else
return "n/a";
}
static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width __maybe_unused)
{
const char *name;
if (!he->simd_flags.arch)
return repsep_snprintf(bf, size, "");
name = hist_entry__get_simd_name(&he->simd_flags);
if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
return repsep_snprintf(bf, size, "[e] %s", name);
else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
return repsep_snprintf(bf, size, "[p] %s", name);
return repsep_snprintf(bf, size, "[.] %s", name);
}
struct sort_entry sort_simd = {
.se_header = "Simd ",
.se_cmp = sort__simd_cmp,
.se_snprintf = hist_entry__simd_snprintf,
.se_width_idx = HISTC_SIMD,
};
/* --sort comm */
/*
* We can't use pointer comparison in functions below,
* because it gives different results based on pointer
* values, which could break some sorting assumptions.
*/
static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
{
return strcmp(comm__str(right->comm), comm__str(left->comm));
}
static int64_t
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
{
return strcmp(comm__str(right->comm), comm__str(left->comm));
}
static int64_t
sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
{
return strcmp(comm__str(right->comm), comm__str(left->comm));
}
static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
}
struct sort_entry sort_comm = {
.se_header = "Command",
.se_cmp = sort__comm_cmp,
.se_collapse = sort__comm_collapse,
.se_sort = sort__comm_sort,
.se_snprintf = hist_entry__comm_snprintf,
.se_filter = hist_entry__thread_filter,
.se_width_idx = HISTC_COMM,
};
/* --sort dso */
static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
{
struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
const char *dso_name_l, *dso_name_r;
if (!dso_l || !dso_r)
return cmp_null(dso_r, dso_l);
if (verbose > 0) {
dso_name_l = dso_l->long_name;
dso_name_r = dso_r->long_name;
} else {
dso_name_l = dso_l->short_name;
dso_name_r = dso_r->short_name;
}
return strcmp(dso_name_l, dso_name_r);
}
static int64_t
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_cmp(right->ms.map, left->ms.map);
}
static int _hist_entry__dso_snprintf(struct map *map, char *bf,
size_t size, unsigned int width)
{
const struct dso *dso = map ? map__dso(map) : NULL;
const char *dso_name = "[unknown]";
if (dso)
dso_name = verbose > 0 ? dso->long_name : dso->short_name;
return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
}
static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
}
static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
{
const struct dso *dso = arg;
if (type != HIST_FILTER__DSO)
return -1;
return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
}
struct sort_entry sort_dso = {
.se_header = "Shared Object",
.se_cmp = sort__dso_cmp,
.se_snprintf = hist_entry__dso_snprintf,
.se_filter = hist_entry__dso_filter,
.se_width_idx = HISTC_DSO,
};
/* --sort symbol */
static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
{
return (int64_t)(right_ip - left_ip);
}
int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
{
if (!sym_l || !sym_r)
return cmp_null(sym_l, sym_r);
if (sym_l == sym_r)
return 0;
if (sym_l->inlined || sym_r->inlined) {
int ret = strcmp(sym_l->name, sym_r->name);
if (ret)
return ret;
if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
return 0;
}
if (sym_l->start != sym_r->start)
return (int64_t)(sym_r->start - sym_l->start);
return (int64_t)(sym_r->end - sym_l->end);
}
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
int64_t ret;
if (!left->ms.sym && !right->ms.sym)
return _sort__addr_cmp(left->ip, right->ip);
/*
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
ret = sort__dso_cmp(left, right);
if (ret != 0)
return ret;
}
return _sort__sym_cmp(left->ms.sym, right->ms.sym);
}
static int64_t
sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
{
if (!left->ms.sym || !right->ms.sym)
return cmp_null(left->ms.sym, right->ms.sym);
return strcmp(right->ms.sym->name, left->ms.sym->name);
}
static int _hist_entry__sym_snprintf(struct map_symbol *ms,
u64 ip, char level, char *bf, size_t size,
unsigned int width)
{
struct symbol *sym = ms->sym;
struct map *map = ms->map;
size_t ret = 0;
if (verbose > 0) {
struct dso *dso = map ? map__dso(map) : NULL;
char o = dso ? dso__symtab_origin(dso) : '!';
u64 rip = ip;
if (dso && dso->kernel && dso->adjust_symbols)
rip = map__unmap_ip(map, ip);
ret += repsep_snprintf(bf, size, "%-#*llx %c ",
BITS_PER_LONG / 4 + 2, rip, o);
}
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
if (sym && map) {
if (sym->type == STT_OBJECT) {
ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
ip - map__unmap_ip(map, sym->start));
} else {
ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
width - ret,
sym->name);
if (sym->inlined)
ret += repsep_snprintf(bf + ret, size - ret,
" (inlined)");
}
} else {
size_t len = BITS_PER_LONG / 4;
ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
len, ip);
}
return ret;
}
int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
{
return _hist_entry__sym_snprintf(&he->ms, he->ip,
he->level, bf, size, width);
}
static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
{
const char *sym = arg;
if (type != HIST_FILTER__SYMBOL)
return -1;
return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
}
struct sort_entry sort_sym = {
.se_header = "Symbol",
.se_cmp = sort__sym_cmp,
.se_sort = sort__sym_sort,
.se_snprintf = hist_entry__sym_snprintf,
.se_filter = hist_entry__sym_filter,
.se_width_idx = HISTC_SYMBOL,
};
/* --sort srcline */
char *hist_entry__srcline(struct hist_entry *he)
{
return map__srcline(he->ms.map, he->ip, he->ms.sym);
}
static int64_t
sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
{
int64_t ret;
ret = _sort__addr_cmp(left->ip, right->ip);
if (ret)
return ret;
return sort__dso_cmp(left, right);
}
static int64_t
sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
{
if (!left->srcline)
left->srcline = hist_entry__srcline(left);
if (!right->srcline)
right->srcline = hist_entry__srcline(right);
return strcmp(right->srcline, left->srcline);
}
static int64_t
sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
{
return sort__srcline_collapse(left, right);
}
static void
sort__srcline_init(struct hist_entry *he)
{
if (!he->srcline)
he->srcline = hist_entry__srcline(he);
}
static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
}
struct sort_entry sort_srcline = {
.se_header = "Source:Line",
.se_cmp = sort__srcline_cmp,
.se_collapse = sort__srcline_collapse,
.se_sort = sort__srcline_sort,
.se_init = sort__srcline_init,
.se_snprintf = hist_entry__srcline_snprintf,
.se_width_idx = HISTC_SRCLINE,
};
/* --sort srcline_from */
static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
{
return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
}
static int64_t
sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->branch_info->from.addr - right->branch_info->from.addr;
}
static int64_t
sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info->srcline_from)
left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
if (!right->branch_info->srcline_from)
right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
}
static int64_t
sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
{
return sort__srcline_from_collapse(left, right);
}
static void sort__srcline_from_init(struct hist_entry *he)
{
if (!he->branch_info->srcline_from)
he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
}
static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
}
struct sort_entry sort_srcline_from = {
.se_header = "From Source:Line",
.se_cmp = sort__srcline_from_cmp,
.se_collapse = sort__srcline_from_collapse,
.se_sort = sort__srcline_from_sort,
.se_init = sort__srcline_from_init,
.se_snprintf = hist_entry__srcline_from_snprintf,
.se_width_idx = HISTC_SRCLINE_FROM,
};
/* --sort srcline_to */
static int64_t
sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->branch_info->to.addr - right->branch_info->to.addr;
}
static int64_t
sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info->srcline_to)
left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
if (!right->branch_info->srcline_to)
right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
}
static int64_t
sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
{
return sort__srcline_to_collapse(left, right);
}
static void sort__srcline_to_init(struct hist_entry *he)
{
if (!he->branch_info->srcline_to)
he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
}
static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
}
struct sort_entry sort_srcline_to = {
.se_header = "To Source:Line",
.se_cmp = sort__srcline_to_cmp,
.se_collapse = sort__srcline_to_collapse,
.se_sort = sort__srcline_to_sort,
.se_init = sort__srcline_to_init,
.se_snprintf = hist_entry__srcline_to_snprintf,
.se_width_idx = HISTC_SRCLINE_TO,
};
static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct symbol *sym = he->ms.sym;
struct annotation *notes;
double ipc = 0.0, coverage = 0.0;
char tmp[64];
if (!sym)
return repsep_snprintf(bf, size, "%-*s", width, "-");
notes = symbol__annotation(sym);
if (notes->hit_cycles)
ipc = notes->hit_insn / ((double)notes->hit_cycles);
if (notes->total_insn) {
coverage = notes->cover_insn * 100.0 /
((double)notes->total_insn);
}
snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
return repsep_snprintf(bf, size, "%-*s", width, tmp);
}
struct sort_entry sort_sym_ipc = {
.se_header = "IPC [IPC Coverage]",
.se_cmp = sort__sym_cmp,
.se_snprintf = hist_entry__sym_ipc_snprintf,
.se_width_idx = HISTC_SYMBOL_IPC,
};
static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
__maybe_unused,
char *bf, size_t size,
unsigned int width)
{
char tmp[64];
snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
return repsep_snprintf(bf, size, "%-*s", width, tmp);
}
struct sort_entry sort_sym_ipc_null = {
.se_header = "IPC [IPC Coverage]",
.se_cmp = sort__sym_cmp,
.se_snprintf = hist_entry__sym_ipc_null_snprintf,
.se_width_idx = HISTC_SYMBOL_IPC,
};
/* --sort srcfile */
static char no_srcfile[1];
static char *hist_entry__get_srcfile(struct hist_entry *e)
{
char *sf, *p;
struct map *map = e->ms.map;
if (!map)
return no_srcfile;
sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
e->ms.sym, false, true, true, e->ip);
if (sf == SRCLINE_UNKNOWN)
return no_srcfile;
p = strchr(sf, ':');
if (p && *sf) {
*p = 0;
return sf;
}
free(sf);
return no_srcfile;
}
static int64_t
sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
{
return sort__srcline_cmp(left, right);
}
static int64_t
sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
{
if (!left->srcfile)
left->srcfile = hist_entry__get_srcfile(left);
if (!right->srcfile)
right->srcfile = hist_entry__get_srcfile(right);
return strcmp(right->srcfile, left->srcfile);
}
static int64_t
sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
{
return sort__srcfile_collapse(left, right);
}
static void sort__srcfile_init(struct hist_entry *he)
{
if (!he->srcfile)
he->srcfile = hist_entry__get_srcfile(he);
}
static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
}
struct sort_entry sort_srcfile = {
.se_header = "Source File",
.se_cmp = sort__srcfile_cmp,
.se_collapse = sort__srcfile_collapse,
.se_sort = sort__srcfile_sort,
.se_init = sort__srcfile_init,
.se_snprintf = hist_entry__srcfile_snprintf,
.se_width_idx = HISTC_SRCFILE,
};
/* --sort parent */
static int64_t
sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct symbol *sym_l = left->parent;
struct symbol *sym_r = right->parent;
if (!sym_l || !sym_r)
return cmp_null(sym_l, sym_r);
return strcmp(sym_r->name, sym_l->name);
}
static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*.*s", width, width,
he->parent ? he->parent->name : "[other]");
}
struct sort_entry sort_parent = {
.se_header = "Parent symbol",
.se_cmp = sort__parent_cmp,
.se_snprintf = hist_entry__parent_snprintf,
.se_width_idx = HISTC_PARENT,
};
/* --sort cpu */
static int64_t
sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->cpu - left->cpu;
}
static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
}
struct sort_entry sort_cpu = {
.se_header = "CPU",
.se_cmp = sort__cpu_cmp,
.se_snprintf = hist_entry__cpu_snprintf,
.se_width_idx = HISTC_CPU,
};
/* --sort cgroup_id */
static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
{
return (int64_t)(right_dev - left_dev);
}
static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
{
return (int64_t)(right_ino - left_ino);
}
static int64_t
sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
{
int64_t ret;
ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
if (ret != 0)
return ret;
return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
left->cgroup_id.ino);
}
static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
char *bf, size_t size,
unsigned int width __maybe_unused)
{
return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
he->cgroup_id.ino);
}
struct sort_entry sort_cgroup_id = {
.se_header = "cgroup id (dev/inode)",
.se_cmp = sort__cgroup_id_cmp,
.se_snprintf = hist_entry__cgroup_id_snprintf,
.se_width_idx = HISTC_CGROUP_ID,
};
/* --sort cgroup */
static int64_t
sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->cgroup - left->cgroup;
}
static int hist_entry__cgroup_snprintf(struct hist_entry *he,
char *bf, size_t size,
unsigned int width __maybe_unused)
{
const char *cgrp_name = "N/A";
if (he->cgroup) {
struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
he->cgroup);
if (cgrp != NULL)
cgrp_name = cgrp->name;
else
cgrp_name = "unknown";
}
return repsep_snprintf(bf, size, "%s", cgrp_name);
}
struct sort_entry sort_cgroup = {
.se_header = "Cgroup",
.se_cmp = sort__cgroup_cmp,
.se_snprintf = hist_entry__cgroup_snprintf,
.se_width_idx = HISTC_CGROUP,
};
/* --sort socket */
static int64_t
sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->socket - left->socket;
}
static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
}
static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
{
int sk = *(const int *)arg;
if (type != HIST_FILTER__SOCKET)
return -1;
return sk >= 0 && he->socket != sk;
}
struct sort_entry sort_socket = {
.se_header = "Socket",
.se_cmp = sort__socket_cmp,
.se_snprintf = hist_entry__socket_snprintf,
.se_filter = hist_entry__socket_filter,
.se_width_idx = HISTC_SOCKET,
};
/* --sort time */
static int64_t
sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->time - left->time;
}
static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char he_time[32];
if (symbol_conf.nanosecs)
timestamp__scnprintf_nsec(he->time, he_time,
sizeof(he_time));
else
timestamp__scnprintf_usec(he->time, he_time,
sizeof(he_time));
return repsep_snprintf(bf, size, "%-.*s", width, he_time);
}
struct sort_entry sort_time = {
.se_header = "Time",
.se_cmp = sort__time_cmp,
.se_snprintf = hist_entry__time_snprintf,
.se_width_idx = HISTC_TIME,
};
/* --sort trace */
#ifdef HAVE_LIBTRACEEVENT
static char *get_trace_output(struct hist_entry *he)
{
struct trace_seq seq;
struct evsel *evsel;
struct tep_record rec = {
.data = he->raw_data,
.size = he->raw_size,
};
evsel = hists_to_evsel(he->hists);
trace_seq_init(&seq);
if (symbol_conf.raw_trace) {
tep_print_fields(&seq, he->raw_data, he->raw_size,
evsel->tp_format);
} else {
tep_print_event(evsel->tp_format->tep,
&seq, &rec, "%s", TEP_PRINT_INFO);
}
/*
* Trim the buffer, it starts at 4KB and we're not going to
* add anything more to this buffer.
*/
return realloc(seq.buffer, seq.len + 1);
}
static int64_t
sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct evsel *evsel;
evsel = hists_to_evsel(left->hists);
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
return 0;
if (left->trace_output == NULL)
left->trace_output = get_trace_output(left);
if (right->trace_output == NULL)
right->trace_output = get_trace_output(right);
return strcmp(right->trace_output, left->trace_output);
}
static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct evsel *evsel;
evsel = hists_to_evsel(he->hists);
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
return scnprintf(bf, size, "%-.*s", width, "N/A");
if (he->trace_output == NULL)
he->trace_output = get_trace_output(he);
return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
}
struct sort_entry sort_trace = {
.se_header = "Trace output",
.se_cmp = sort__trace_cmp,
.se_snprintf = hist_entry__trace_snprintf,
.se_width_idx = HISTC_TRACE,
};
#endif /* HAVE_LIBTRACEEVENT */
/* sort keys for branch stacks */
static int64_t
sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return _sort__dso_cmp(left->branch_info->from.ms.map,
right->branch_info->from.ms.map);
}
static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info)
return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
bf, size, width);
else
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
const void *arg)
{
const struct dso *dso = arg;
if (type != HIST_FILTER__DSO)
return -1;
return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
map__dso(he->branch_info->from.ms.map) != dso);
}
static int64_t
sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return _sort__dso_cmp(left->branch_info->to.ms.map,
right->branch_info->to.ms.map);
}
static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info)
return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
bf, size, width);
else
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
const void *arg)
{
const struct dso *dso = arg;
if (type != HIST_FILTER__DSO)
return -1;
return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
map__dso(he->branch_info->to.ms.map) != dso);
}
static int64_t
sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *from_l, *from_r;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
from_l = &left->branch_info->from;
from_r = &right->branch_info->from;
if (!from_l->ms.sym && !from_r->ms.sym)
return _sort__addr_cmp(from_l->addr, from_r->addr);
return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
}
static int64_t
sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *to_l, *to_r;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
to_l = &left->branch_info->to;
to_r = &right->branch_info->to;
if (!to_l->ms.sym && !to_r->ms.sym)
return _sort__addr_cmp(to_l->addr, to_r->addr);
return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
}
static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info) {
struct addr_map_symbol *from = &he->branch_info->from;
return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
from->al_level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info) {
struct addr_map_symbol *to = &he->branch_info->to;
return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
to->al_level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
const void *arg)
{
const char *sym = arg;
if (type != HIST_FILTER__SYMBOL)
return -1;
return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
strstr(he->branch_info->from.ms.sym->name, sym));
}
static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
const void *arg)
{
const char *sym = arg;
if (type != HIST_FILTER__SYMBOL)
return -1;
return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
strstr(he->branch_info->to.ms.sym->name, sym));
}
struct sort_entry sort_dso_from = {
.se_header = "Source Shared Object",
.se_cmp = sort__dso_from_cmp,
.se_snprintf = hist_entry__dso_from_snprintf,
.se_filter = hist_entry__dso_from_filter,
.se_width_idx = HISTC_DSO_FROM,
};
struct sort_entry sort_dso_to = {
.se_header = "Target Shared Object",
.se_cmp = sort__dso_to_cmp,
.se_snprintf = hist_entry__dso_to_snprintf,
.se_filter = hist_entry__dso_to_filter,
.se_width_idx = HISTC_DSO_TO,
};
struct sort_entry sort_sym_from = {
.se_header = "Source Symbol",
.se_cmp = sort__sym_from_cmp,
.se_snprintf = hist_entry__sym_from_snprintf,
.se_filter = hist_entry__sym_from_filter,
.se_width_idx = HISTC_SYMBOL_FROM,
};
struct sort_entry sort_sym_to = {
.se_header = "Target Symbol",
.se_cmp = sort__sym_to_cmp,
.se_snprintf = hist_entry__sym_to_snprintf,
.se_filter = hist_entry__sym_to_filter,
.se_width_idx = HISTC_SYMBOL_TO,
};
static int _hist_entry__addr_snprintf(struct map_symbol *ms,
u64 ip, char level, char *bf, size_t size,
unsigned int width)
{
struct symbol *sym = ms->sym;
struct map *map = ms->map;
size_t ret = 0, offs;
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
if (sym && map) {
if (sym->type == STT_OBJECT) {
ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
ip - map__unmap_ip(map, sym->start));
} else {
ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
width - ret,
sym->name);
offs = ip - sym->start;
if (offs)
ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
}
} else {
size_t len = BITS_PER_LONG / 4;
ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
len, ip);
}
return ret;
}
static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info) {
struct addr_map_symbol *from = &he->branch_info->from;
return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info) {
struct addr_map_symbol *to = &he->branch_info->to;
return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int64_t
sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *from_l;
struct addr_map_symbol *from_r;
int64_t ret;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
from_l = &left->branch_info->from;
from_r = &right->branch_info->from;
/*
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
if (ret != 0)
return ret;
return _sort__addr_cmp(from_l->addr, from_r->addr);
}
static int64_t
sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *to_l;
struct addr_map_symbol *to_r;
int64_t ret;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
to_l = &left->branch_info->to;
to_r = &right->branch_info->to;
/*
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
if (ret != 0)
return ret;
return _sort__addr_cmp(to_l->addr, to_r->addr);
}
struct sort_entry sort_addr_from = {
.se_header = "Source Address",
.se_cmp = sort__addr_from_cmp,
.se_snprintf = hist_entry__addr_from_snprintf,
.se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
.se_width_idx = HISTC_ADDR_FROM,
};
struct sort_entry sort_addr_to = {
.se_header = "Target Address",
.se_cmp = sort__addr_to_cmp,
.se_snprintf = hist_entry__addr_to_snprintf,
.se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
.se_width_idx = HISTC_ADDR_TO,
};
static int64_t
sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
{
unsigned char mp, p;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
return mp || p;
}
static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width){
static const char *out = "N/A";
if (he->branch_info) {
if (he->branch_info->flags.predicted)
out = "N";
else if (he->branch_info->flags.mispred)
out = "Y";
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
}
static int64_t
sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return left->branch_info->flags.cycles -
right->branch_info->flags.cycles;
}
static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (!he->branch_info)
return scnprintf(bf, size, "%-.*s", width, "N/A");
if (he->branch_info->flags.cycles == 0)
return repsep_snprintf(bf, size, "%-*s", width, "-");
return repsep_snprintf(bf, size, "%-*hd", width,
he->branch_info->flags.cycles);
}
struct sort_entry sort_cycles = {
.se_header = "Basic Block Cycles",
.se_cmp = sort__cycles_cmp,
.se_snprintf = hist_entry__cycles_snprintf,
.se_width_idx = HISTC_CYCLES,
};
/* --sort daddr_sym */
int64_t
sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
if (left->mem_info)
l = left->mem_info->daddr.addr;
if (right->mem_info)
r = right->mem_info->daddr.addr;
return (int64_t)(r - l);
}
static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
struct map_symbol *ms = NULL;
if (he->mem_info) {
addr = he->mem_info->daddr.addr;
ms = &he->mem_info->daddr.ms;
}
return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
int64_t
sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
if (left->mem_info)
l = left->mem_info->iaddr.addr;
if (right->mem_info)
r = right->mem_info->iaddr.addr;
return (int64_t)(r - l);
}
static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
struct map_symbol *ms = NULL;
if (he->mem_info) {
addr = he->mem_info->iaddr.addr;
ms = &he->mem_info->iaddr.ms;
}
return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
static int64_t
sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct map *map_l = NULL;
struct map *map_r = NULL;
if (left->mem_info)
map_l = left->mem_info->daddr.ms.map;
if (right->mem_info)
map_r = right->mem_info->daddr.ms.map;
return _sort__dso_cmp(map_l, map_r);
}
static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct map *map = NULL;
if (he->mem_info)
map = he->mem_info->daddr.ms.map;
return _hist_entry__dso_snprintf(map, bf, size, width);
}
static int64_t
sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_lock = PERF_MEM_LOCK_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_lock = PERF_MEM_LOCK_NA;
return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
}
static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[10];
perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%.*s", width, out);
}
static int64_t
sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
}
static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
static int64_t
sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_lvl = PERF_MEM_LVL_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_lvl = PERF_MEM_LVL_NA;
return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
}
static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
static int64_t
sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
}
static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
int64_t
sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
{
u64 l, r;
struct map *l_map, *r_map;
struct dso *l_dso, *r_dso;
int rc;
if (!left->mem_info) return -1;
if (!right->mem_info) return 1;
/* group event types together */
if (left->cpumode > right->cpumode) return -1;
if (left->cpumode < right->cpumode) return 1;
l_map = left->mem_info->daddr.ms.map;
r_map = right->mem_info->daddr.ms.map;
/* if both are NULL, jump to sort on al_addr instead */
if (!l_map && !r_map)
goto addr;
if (!l_map) return -1;
if (!r_map) return 1;
l_dso = map__dso(l_map);
r_dso = map__dso(r_map);
rc = dso__cmp_id(l_dso, r_dso);
if (rc)
return rc;
/*
* Addresses with no major/minor numbers are assumed to be
* anonymous in userspace. Sort those on pid then address.
*
* The kernel and non-zero major/minor mapped areas are
* assumed to be unity mapped. Sort those on address.
*/
if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
(!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min &&
!l_dso->id.ino && !l_dso->id.ino_generation) {
/* userspace anonymous */
if (thread__pid(left->thread) > thread__pid(right->thread))
return -1;
if (thread__pid(left->thread) < thread__pid(right->thread))
return 1;
}
addr:
/* al_addr does all the right addr - start + offset calculations */
l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl);
r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl);
if (l > r) return -1;
if (l < r) return 1;
return 0;
}
static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
struct map_symbol *ms = NULL;
char level = he->level;
if (he->mem_info) {
struct map *map = he->mem_info->daddr.ms.map;
struct dso *dso = map ? map__dso(map) : NULL;
addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl);
ms = &he->mem_info->daddr.ms;
/* print [s] for shared data mmaps */
if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
map && !(map__prot(map) & PROT_EXEC) &&
(map__flags(map) & MAP_SHARED) &&
(dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation))
level = 's';
else if (!map)
level = 'X';
}
return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
}
struct sort_entry sort_mispredict = {
.se_header = "Branch Mispredicted",
.se_cmp = sort__mispredict_cmp,
.se_snprintf = hist_entry__mispredict_snprintf,
.se_width_idx = HISTC_MISPREDICT,
};
static int64_t
sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->weight - right->weight;
}
static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
}
struct sort_entry sort_local_weight = {
.se_header = "Local Weight",
.se_cmp = sort__weight_cmp,
.se_snprintf = hist_entry__local_weight_snprintf,
.se_width_idx = HISTC_LOCAL_WEIGHT,
};
static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*llu", width,
he->weight * he->stat.nr_events);
}
struct sort_entry sort_global_weight = {
.se_header = "Weight",
.se_cmp = sort__weight_cmp,
.se_snprintf = hist_entry__global_weight_snprintf,
.se_width_idx = HISTC_GLOBAL_WEIGHT,
};
static int64_t
sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->ins_lat - right->ins_lat;
}
static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
}
struct sort_entry sort_local_ins_lat = {
.se_header = "Local INSTR Latency",
.se_cmp = sort__ins_lat_cmp,
.se_snprintf = hist_entry__local_ins_lat_snprintf,
.se_width_idx = HISTC_LOCAL_INS_LAT,
};
static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*u", width,
he->ins_lat * he->stat.nr_events);
}
struct sort_entry sort_global_ins_lat = {
.se_header = "INSTR Latency",
.se_cmp = sort__ins_lat_cmp,
.se_snprintf = hist_entry__global_ins_lat_snprintf,
.se_width_idx = HISTC_GLOBAL_INS_LAT,
};
static int64_t
sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->p_stage_cyc - right->p_stage_cyc;
}
static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*u", width,
he->p_stage_cyc * he->stat.nr_events);
}
static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
}
struct sort_entry sort_local_p_stage_cyc = {
.se_header = "Local Pipeline Stage Cycle",
.se_cmp = sort__p_stage_cyc_cmp,
.se_snprintf = hist_entry__p_stage_cyc_snprintf,
.se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
};
struct sort_entry sort_global_p_stage_cyc = {
.se_header = "Pipeline Stage Cycle",
.se_cmp = sort__p_stage_cyc_cmp,
.se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
.se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
};
struct sort_entry sort_mem_daddr_sym = {
.se_header = "Data Symbol",
.se_cmp = sort__daddr_cmp,
.se_snprintf = hist_entry__daddr_snprintf,
.se_width_idx = HISTC_MEM_DADDR_SYMBOL,
};
struct sort_entry sort_mem_iaddr_sym = {
.se_header = "Code Symbol",
.se_cmp = sort__iaddr_cmp,
.se_snprintf = hist_entry__iaddr_snprintf,
.se_width_idx = HISTC_MEM_IADDR_SYMBOL,
};
struct sort_entry sort_mem_daddr_dso = {
.se_header = "Data Object",
.se_cmp = sort__dso_daddr_cmp,
.se_snprintf = hist_entry__dso_daddr_snprintf,
.se_width_idx = HISTC_MEM_DADDR_DSO,
};
struct sort_entry sort_mem_locked = {
.se_header = "Locked",
.se_cmp = sort__locked_cmp,
.se_snprintf = hist_entry__locked_snprintf,
.se_width_idx = HISTC_MEM_LOCKED,
};
struct sort_entry sort_mem_tlb = {
.se_header = "TLB access",
.se_cmp = sort__tlb_cmp,
.se_snprintf = hist_entry__tlb_snprintf,
.se_width_idx = HISTC_MEM_TLB,
};
struct sort_entry sort_mem_lvl = {
.se_header = "Memory access",
.se_cmp = sort__lvl_cmp,
.se_snprintf = hist_entry__lvl_snprintf,
.se_width_idx = HISTC_MEM_LVL,
};
struct sort_entry sort_mem_snoop = {
.se_header = "Snoop",
.se_cmp = sort__snoop_cmp,
.se_snprintf = hist_entry__snoop_snprintf,
.se_width_idx = HISTC_MEM_SNOOP,
};
struct sort_entry sort_mem_dcacheline = {
.se_header = "Data Cacheline",
.se_cmp = sort__dcacheline_cmp,
.se_snprintf = hist_entry__dcacheline_snprintf,
.se_width_idx = HISTC_MEM_DCACHELINE,
};
static int64_t
sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_blk = PERF_MEM_BLK_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_blk = PERF_MEM_BLK_NA;
return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
}
static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[16];
perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%.*s", width, out);
}
struct sort_entry sort_mem_blocked = {
.se_header = "Blocked",
.se_cmp = sort__blocked_cmp,
.se_snprintf = hist_entry__blocked_snprintf,
.se_width_idx = HISTC_MEM_BLOCKED,
};
static int64_t
sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
if (left->mem_info)
l = left->mem_info->daddr.phys_addr;
if (right->mem_info)
r = right->mem_info->daddr.phys_addr;
return (int64_t)(r - l);
}
static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
size_t ret = 0;
size_t len = BITS_PER_LONG / 4;
addr = he->mem_info->daddr.phys_addr;
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
if (ret > width)
bf[width] = '\0';
return width;
}
struct sort_entry sort_mem_phys_daddr = {
.se_header = "Data Physical Address",
.se_cmp = sort__phys_daddr_cmp,
.se_snprintf = hist_entry__phys_daddr_snprintf,
.se_width_idx = HISTC_MEM_PHYS_DADDR,
};
static int64_t
sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
if (left->mem_info)
l = left->mem_info->daddr.data_page_size;
if (right->mem_info)
r = right->mem_info->daddr.data_page_size;
return (int64_t)(r - l);
}
static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char str[PAGE_SIZE_NAME_LEN];
return repsep_snprintf(bf, size, "%-*s", width,
get_page_size_name(he->mem_info->daddr.data_page_size, str));
}
struct sort_entry sort_mem_data_page_size = {
.se_header = "Data Page Size",
.se_cmp = sort__data_page_size_cmp,
.se_snprintf = hist_entry__data_page_size_snprintf,
.se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
};
static int64_t
sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = left->code_page_size;
uint64_t r = right->code_page_size;
return (int64_t)(r - l);
}
static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char str[PAGE_SIZE_NAME_LEN];
return repsep_snprintf(bf, size, "%-*s", width,
get_page_size_name(he->code_page_size, str));
}
struct sort_entry sort_code_page_size = {
.se_header = "Code Page Size",
.se_cmp = sort__code_page_size_cmp,
.se_snprintf = hist_entry__code_page_size_snprintf,
.se_width_idx = HISTC_CODE_PAGE_SIZE,
};
static int64_t
sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return left->branch_info->flags.abort !=
right->branch_info->flags.abort;
}
static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
static const char *out = "N/A";
if (he->branch_info) {
if (he->branch_info->flags.abort)
out = "A";
else
out = ".";
}
return repsep_snprintf(bf, size, "%-*s", width, out);
}
struct sort_entry sort_abort = {
.se_header = "Transaction abort",
.se_cmp = sort__abort_cmp,
.se_snprintf = hist_entry__abort_snprintf,
.se_width_idx = HISTC_ABORT,
};
static int64_t
sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return left->branch_info->flags.in_tx !=
right->branch_info->flags.in_tx;
}
static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
static const char *out = "N/A";
if (he->branch_info) {
if (he->branch_info->flags.in_tx)
out = "T";
else
out = ".";
}
return repsep_snprintf(bf, size, "%-*s", width, out);
}
struct sort_entry sort_in_tx = {
.se_header = "Branch in transaction",
.se_cmp = sort__in_tx_cmp,
.se_snprintf = hist_entry__in_tx_snprintf,
.se_width_idx = HISTC_IN_TX,
};
static int64_t
sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->transaction - right->transaction;
}
static inline char *add_str(char *p, const char *str)
{
strcpy(p, str);
return p + strlen(str);
}
static struct txbit {
unsigned flag;
const char *name;
int skip_for_len;
} txbits[] = {
{ PERF_TXN_ELISION, "EL ", 0 },
{ PERF_TXN_TRANSACTION, "TX ", 1 },
{ PERF_TXN_SYNC, "SYNC ", 1 },
{ PERF_TXN_ASYNC, "ASYNC ", 0 },
{ PERF_TXN_RETRY, "RETRY ", 0 },
{ PERF_TXN_CONFLICT, "CON ", 0 },
{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
{ PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
{ 0, NULL, 0 }
};
int hist_entry__transaction_len(void)
{
int i;
int len = 0;
for (i = 0; txbits[i].name; i++) {
if (!txbits[i].skip_for_len)
len += strlen(txbits[i].name);
}
len += 4; /* :XX<space> */
return len;
}
static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
u64 t = he->transaction;
char buf[128];
char *p = buf;
int i;
buf[0] = 0;
for (i = 0; txbits[i].name; i++)
if (txbits[i].flag & t)
p = add_str(p, txbits[i].name);
if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
p = add_str(p, "NEITHER ");
if (t & PERF_TXN_ABORT_MASK) {
sprintf(p, ":%" PRIx64,
(t & PERF_TXN_ABORT_MASK) >>
PERF_TXN_ABORT_SHIFT);
p += strlen(p);
}
return repsep_snprintf(bf, size, "%-*s", width, buf);
}
struct sort_entry sort_transaction = {
.se_header = "Transaction ",
.se_cmp = sort__transaction_cmp,
.se_snprintf = hist_entry__transaction_snprintf,
.se_width_idx = HISTC_TRANSACTION,
};
/* --sort symbol_size */
static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
{
int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
return size_l < size_r ? -1 :
size_l == size_r ? 0 : 1;
}
static int64_t
sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
}
static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
size_t bf_size, unsigned int width)
{
if (sym)
return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
}
static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
}
struct sort_entry sort_sym_size = {
.se_header = "Symbol size",
.se_cmp = sort__sym_size_cmp,
.se_snprintf = hist_entry__sym_size_snprintf,
.se_width_idx = HISTC_SYM_SIZE,
};
/* --sort dso_size */
static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
{
int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
return size_l < size_r ? -1 :
size_l == size_r ? 0 : 1;
}
static int64_t
sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_size_cmp(right->ms.map, left->ms.map);
}
static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
size_t bf_size, unsigned int width)
{
if (map && map__dso(map))
return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
}
static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
}
struct sort_entry sort_dso_size = {
.se_header = "DSO size",
.se_cmp = sort__dso_size_cmp,
.se_snprintf = hist_entry__dso_size_snprintf,
.se_width_idx = HISTC_DSO_SIZE,
};
/* --sort dso_size */
static int64_t
sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
{
u64 left_ip = left->ip;
u64 right_ip = right->ip;
struct map *left_map = left->ms.map;
struct map *right_map = right->ms.map;
if (left_map)
left_ip = map__unmap_ip(left_map, left_ip);
if (right_map)
right_ip = map__unmap_ip(right_map, right_ip);
return _sort__addr_cmp(left_ip, right_ip);
}
static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
u64 ip = he->ip;
struct map *map = he->ms.map;
if (map)
ip = map__unmap_ip(map, ip);
return repsep_snprintf(bf, size, "%-#*llx", width, ip);
}
struct sort_entry sort_addr = {
.se_header = "Address",
.se_cmp = sort__addr_cmp,
.se_snprintf = hist_entry__addr_snprintf,
.se_width_idx = HISTC_ADDR,
};
struct sort_dimension {
const char *name;
struct sort_entry *entry;
int taken;
};
int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
{
return 0;
}
const char * __weak arch_perf_header_entry(const char *se_header)
{
return se_header;
}
static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
{
sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
}
#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_PID, "pid", sort_thread),
DIM(SORT_COMM, "comm", sort_comm),
DIM(SORT_DSO, "dso", sort_dso),
DIM(SORT_SYM, "symbol", sort_sym),
DIM(SORT_PARENT, "parent", sort_parent),
DIM(SORT_CPU, "cpu", sort_cpu),
DIM(SORT_SOCKET, "socket", sort_socket),
DIM(SORT_SRCLINE, "srcline", sort_srcline),
DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
DIM(SORT_TRANSACTION, "transaction", sort_transaction),
#ifdef HAVE_LIBTRACEEVENT
DIM(SORT_TRACE, "trace", sort_trace),
#endif
DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
DIM(SORT_CGROUP, "cgroup", sort_cgroup),
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
DIM(SORT_TIME, "time", sort_time),
DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
DIM(SORT_ADDR, "addr", sort_addr),
DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
DIM(SORT_SIMD, "simd", sort_simd)
};
#undef DIM
#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
static struct sort_dimension bstack_sort_dimensions[] = {
DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
DIM(SORT_IN_TX, "in_tx", sort_in_tx),
DIM(SORT_ABORT, "abort", sort_abort),
DIM(SORT_CYCLES, "cycles", sort_cycles),
DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
};
#undef DIM
#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
static struct sort_dimension memory_sort_dimensions[] = {
DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
};
#undef DIM
struct hpp_dimension {
const char *name;
struct perf_hpp_fmt *fmt;
int taken;
};
#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
static struct hpp_dimension hpp_sort_dimensions[] = {
DIM(PERF_HPP__OVERHEAD, "overhead"),
DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
DIM(PERF_HPP__SAMPLES, "sample"),
DIM(PERF_HPP__PERIOD, "period"),
};
#undef DIM
struct hpp_sort_entry {
struct perf_hpp_fmt hpp;
struct sort_entry *se;
};
void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
{
struct hpp_sort_entry *hse;
if (!perf_hpp__is_sort_entry(fmt))
return;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
}
static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hists *hists, int line __maybe_unused,
int *span __maybe_unused)
{
struct hpp_sort_entry *hse;
size_t len = fmt->user_len;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (!len)
len = hists__col_len(hists, hse->se->se_width_idx);
return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
}
static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists)
{
struct hpp_sort_entry *hse;
size_t len = fmt->user_len;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (!len)
len = hists__col_len(hists, hse->se->se_width_idx);
return len;
}
static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hpp_sort_entry *hse;
size_t len = fmt->user_len;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (!len)
len = hists__col_len(he->hists, hse->se->se_width_idx);
return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
}
static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_sort_entry *hse;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
return hse->se->se_cmp(a, b);
}
static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_sort_entry *hse;
int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
hse = container_of(fmt, struct hpp_sort_entry, hpp);
collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
return collapse_fn(a, b);
}
static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_sort_entry *hse;
int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
hse = container_of(fmt, struct hpp_sort_entry, hpp);
sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
return sort_fn(a, b);
}
bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
{
return format->header == __sort__hpp_header;
}
#define MK_SORT_ENTRY_CHK(key) \
bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
{ \
struct hpp_sort_entry *hse; \
\
if (!perf_hpp__is_sort_entry(fmt)) \
return false; \
\
hse = container_of(fmt, struct hpp_sort_entry, hpp); \
return hse->se == &sort_ ## key ; \
}
#ifdef HAVE_LIBTRACEEVENT
MK_SORT_ENTRY_CHK(trace)
#else
bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
{
return false;
}
#endif
MK_SORT_ENTRY_CHK(srcline)
MK_SORT_ENTRY_CHK(srcfile)
MK_SORT_ENTRY_CHK(thread)
MK_SORT_ENTRY_CHK(comm)
MK_SORT_ENTRY_CHK(dso)
MK_SORT_ENTRY_CHK(sym)
static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
{
struct hpp_sort_entry *hse_a;
struct hpp_sort_entry *hse_b;
if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
return false;
hse_a = container_of(a, struct hpp_sort_entry, hpp);
hse_b = container_of(b, struct hpp_sort_entry, hpp);
return hse_a->se == hse_b->se;
}
static void hse_free(struct perf_hpp_fmt *fmt)
{
struct hpp_sort_entry *hse;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
free(hse);
}
static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
{
struct hpp_sort_entry *hse;
if (!perf_hpp__is_sort_entry(fmt))
return;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (hse->se->se_init)
hse->se->se_init(he);
}
static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
{
struct hpp_sort_entry *hse;
hse = malloc(sizeof(*hse));
if (hse == NULL) {
pr_err("Memory allocation failed\n");
return NULL;
}
hse->se = sd->entry;
hse->hpp.name = sd->entry->se_header;
hse->hpp.header = __sort__hpp_header;
hse->hpp.width = __sort__hpp_width;
hse->hpp.entry = __sort__hpp_entry;
hse->hpp.color = NULL;
hse->hpp.cmp = __sort__hpp_cmp;
hse->hpp.collapse = __sort__hpp_collapse;
hse->hpp.sort = __sort__hpp_sort;
hse->hpp.equal = __sort__hpp_equal;
hse->hpp.free = hse_free;
hse->hpp.init = hse_init;
INIT_LIST_HEAD(&hse->hpp.list);
INIT_LIST_HEAD(&hse->hpp.sort_list);
hse->hpp.elide = false;
hse->hpp.len = 0;
hse->hpp.user_len = 0;
hse->hpp.level = level;
return hse;
}
static void hpp_free(struct perf_hpp_fmt *fmt)
{
free(fmt);
}
static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
int level)
{
struct perf_hpp_fmt *fmt;
fmt = memdup(hd->fmt, sizeof(*fmt));
if (fmt) {
INIT_LIST_HEAD(&fmt->list);
INIT_LIST_HEAD(&fmt->sort_list);
fmt->free = hpp_free;
fmt->level = level;
}
return fmt;
}
int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
{
struct perf_hpp_fmt *fmt;
struct hpp_sort_entry *hse;
int ret = -1;
int r;
perf_hpp_list__for_each_format(he->hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (hse->se->se_filter == NULL)
continue;
/*
* hist entry is filtered if any of sort key in the hpp list
* is applied. But it should skip non-matched filter types.
*/
r = hse->se->se_filter(he, type, arg);
if (r >= 0) {
if (ret < 0)
ret = 0;
ret |= r;
}
}
return ret;
}
static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
struct perf_hpp_list *list,
int level)
{
struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
if (hse == NULL)
return -1;
perf_hpp_list__register_sort_field(list, &hse->hpp);
return 0;
}
static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
struct perf_hpp_list *list)
{
struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
if (hse == NULL)
return -1;
perf_hpp_list__column_register(list, &hse->hpp);
return 0;
}
#ifndef HAVE_LIBTRACEEVENT
bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
{
return false;
}
bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
struct hists *hists __maybe_unused)
{
return false;
}
#else
struct hpp_dynamic_entry {
struct perf_hpp_fmt hpp;
struct evsel *evsel;
struct tep_format_field *field;
unsigned dynamic_len;
bool raw_trace;
};
static int hde_width(struct hpp_dynamic_entry *hde)
{
if (!hde->hpp.len) {
int len = hde->dynamic_len;
int namelen = strlen(hde->field->name);
int fieldlen = hde->field->size;
if (namelen > len)
len = namelen;
if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
/* length for print hex numbers */
fieldlen = hde->field->size * 2 + 2;
}
if (fieldlen > len)
len = fieldlen;
hde->hpp.len = len;
}
return hde->hpp.len;
}
static void update_dynamic_len(struct hpp_dynamic_entry *hde,
struct hist_entry *he)
{
char *str, *pos;
struct tep_format_field *field = hde->field;
size_t namelen;
bool last = false;
if (hde->raw_trace)
return;
/* parse pretty print result and update max length */
if (!he->trace_output)
he->trace_output = get_trace_output(he);
namelen = strlen(field->name);
str = he->trace_output;
while (str) {
pos = strchr(str, ' ');
if (pos == NULL) {
last = true;
pos = str + strlen(str);
}
if (!strncmp(str, field->name, namelen)) {
size_t len;
str += namelen + 1;
len = pos - str;
if (len > hde->dynamic_len)
hde->dynamic_len = len;
break;
}
if (last)
str = NULL;
else
str = pos + 1;
}
}
static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hists *hists __maybe_unused,
int line __maybe_unused,
int *span __maybe_unused)
{
struct hpp_dynamic_entry *hde;
size_t len = fmt->user_len;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
if (!len)
len = hde_width(hde);
return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
}
static int __sort__hde_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists __maybe_unused)
{
struct hpp_dynamic_entry *hde;
size_t len = fmt->user_len;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
if (!len)
len = hde_width(hde);
return len;
}
bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
{
struct hpp_dynamic_entry *hde;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
return hists_to_evsel(hists) == hde->evsel;
}
static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hpp_dynamic_entry *hde;
size_t len = fmt->user_len;
char *str, *pos;
struct tep_format_field *field;
size_t namelen;
bool last = false;
int ret;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
if (!len)
len = hde_width(hde);
if (hde->raw_trace)
goto raw_field;
if (!he->trace_output)
he->trace_output = get_trace_output(he);
field = hde->field;
namelen = strlen(field->name);
str = he->trace_output;
while (str) {
pos = strchr(str, ' ');
if (pos == NULL) {
last = true;
pos = str + strlen(str);
}
if (!strncmp(str, field->name, namelen)) {
str += namelen + 1;
str = strndup(str, pos - str);
if (str == NULL)
return scnprintf(hpp->buf, hpp->size,
"%*.*s", len, len, "ERROR");
break;
}
if (last)
str = NULL;
else
str = pos + 1;
}
if (str == NULL) {
struct trace_seq seq;
raw_field:
trace_seq_init(&seq);
tep_print_field(&seq, he->raw_data, hde->field);
str = seq.buffer;
}
ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
free(str);
return ret;
}
static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_dynamic_entry *hde;
struct tep_format_field *field;
unsigned offset, size;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
field = hde->field;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
unsigned long long dyn;
tep_read_number_field(field, a->raw_data, &dyn);
offset = dyn & 0xffff;
size = (dyn >> 16) & 0xffff;
if (tep_field_is_relative(field->flags))
offset += field->offset + field->size;
/* record max width for output */
if (size > hde->dynamic_len)
hde->dynamic_len = size;
} else {
offset = field->offset;
size = field->size;
}
return memcmp(a->raw_data + offset, b->raw_data + offset, size);
}
bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
{
return fmt->cmp == __sort__hde_cmp;
}
static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
{
struct hpp_dynamic_entry *hde_a;
struct hpp_dynamic_entry *hde_b;
if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
return false;
hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
return hde_a->field == hde_b->field;
}
static void hde_free(struct perf_hpp_fmt *fmt)
{
struct hpp_dynamic_entry *hde;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
free(hde);
}
static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
{
struct hpp_dynamic_entry *hde;
if (!perf_hpp__is_dynamic_entry(fmt))
return;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
update_dynamic_len(hde, he);
}
static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
int level)
{
struct hpp_dynamic_entry *hde;
hde = malloc(sizeof(*hde));
if (hde == NULL) {
pr_debug("Memory allocation failed\n");
return NULL;
}
hde->evsel = evsel;
hde->field = field;
hde->dynamic_len = 0;
hde->hpp.name = field->name;
hde->hpp.header = __sort__hde_header;
hde->hpp.width = __sort__hde_width;
hde->hpp.entry = __sort__hde_entry;
hde->hpp.color = NULL;
hde->hpp.init = __sort__hde_init;
hde->hpp.cmp = __sort__hde_cmp;
hde->hpp.collapse = __sort__hde_cmp;
hde->hpp.sort = __sort__hde_cmp;
hde->hpp.equal = __sort__hde_equal;
hde->hpp.free = hde_free;
INIT_LIST_HEAD(&hde->hpp.list);
INIT_LIST_HEAD(&hde->hpp.sort_list);
hde->hpp.elide = false;
hde->hpp.len = 0;
hde->hpp.user_len = 0;
hde->hpp.level = level;
return hde;
}
#endif /* HAVE_LIBTRACEEVENT */
struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
{
struct perf_hpp_fmt *new_fmt = NULL;
if (perf_hpp__is_sort_entry(fmt)) {
struct hpp_sort_entry *hse, *new_hse;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
new_hse = memdup(hse, sizeof(*hse));
if (new_hse)
new_fmt = &new_hse->hpp;
#ifdef HAVE_LIBTRACEEVENT
} else if (perf_hpp__is_dynamic_entry(fmt)) {
struct hpp_dynamic_entry *hde, *new_hde;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
new_hde = memdup(hde, sizeof(*hde));
if (new_hde)
new_fmt = &new_hde->hpp;
#endif
} else {
new_fmt = memdup(fmt, sizeof(*fmt));
}
INIT_LIST_HEAD(&new_fmt->list);
INIT_LIST_HEAD(&new_fmt->sort_list);
return new_fmt;
}
static int parse_field_name(char *str, char **event, char **field, char **opt)
{
char *event_name, *field_name, *opt_name;
event_name = str;
field_name = strchr(str, '.');
if (field_name) {
*field_name++ = '\0';
} else {
event_name = NULL;
field_name = str;
}
opt_name = strchr(field_name, '/');
if (opt_name)
*opt_name++ = '\0';
*event = event_name;
*field = field_name;
*opt = opt_name;
return 0;
}
/* find match evsel using a given event name. The event name can be:
* 1. '%' + event index (e.g. '%1' for first event)
* 2. full event name (e.g. sched:sched_switch)
* 3. partial event name (should not contain ':')
*/
static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
{
struct evsel *evsel = NULL;
struct evsel *pos;
bool full_name;
/* case 1 */
if (event_name[0] == '%') {
int nr = strtol(event_name+1, NULL, 0);
if (nr > evlist->core.nr_entries)
return NULL;
evsel = evlist__first(evlist);
while (--nr > 0)
evsel = evsel__next(evsel);
return evsel;
}
full_name = !!strchr(event_name, ':');
evlist__for_each_entry(evlist, pos) {
/* case 2 */
if (full_name && evsel__name_is(pos, event_name))
return pos;
/* case 3 */
if (!full_name && strstr(pos->name, event_name)) {
if (evsel) {
pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
event_name, evsel->name, pos->name);
return NULL;
}
evsel = pos;
}
}
return evsel;
}
#ifdef HAVE_LIBTRACEEVENT
static int __dynamic_dimension__add(struct evsel *evsel,
struct tep_format_field *field,
bool raw_trace, int level)
{
struct hpp_dynamic_entry *hde;
hde = __alloc_dynamic_entry(evsel, field, level);
if (hde == NULL)
return -ENOMEM;
hde->raw_trace = raw_trace;
perf_hpp__register_sort_field(&hde->hpp);
return 0;
}
static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
{
int ret;
struct tep_format_field *field;
field = evsel->tp_format->format.fields;
while (field) {
ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
return ret;
field = field->next;
}
return 0;
}
static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
int level)
{
int ret;
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
ret = add_evsel_fields(evsel, raw_trace, level);
if (ret < 0)
return ret;
}
return 0;
}
static int add_all_matching_fields(struct evlist *evlist,
char *field_name, bool raw_trace, int level)
{
int ret = -ESRCH;
struct evsel *evsel;
struct tep_format_field *field;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
field = tep_find_any_field(evsel->tp_format, field_name);
if (field == NULL)
continue;
ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
break;
}
return ret;
}
#endif /* HAVE_LIBTRACEEVENT */
static int add_dynamic_entry(struct evlist *evlist, const char *tok,
int level)
{
char *str, *event_name, *field_name, *opt_name;
struct evsel *evsel;
bool raw_trace = symbol_conf.raw_trace;
int ret = 0;
if (evlist == NULL)
return -ENOENT;
str = strdup(tok);
if (str == NULL)
return -ENOMEM;
if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
ret = -EINVAL;
goto out;
}
if (opt_name) {
if (strcmp(opt_name, "raw")) {
pr_debug("unsupported field option %s\n", opt_name);
ret = -EINVAL;
goto out;
}
raw_trace = true;
}
#ifdef HAVE_LIBTRACEEVENT
if (!strcmp(field_name, "trace_fields")) {
ret = add_all_dynamic_fields(evlist, raw_trace, level);
goto out;
}
if (event_name == NULL) {
ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
goto out;
}
#else
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
ret = -ENOTSUP;
}
}
if (ret) {
pr_err("\n");
goto out;
}
#endif
evsel = find_evsel(evlist, event_name);
if (evsel == NULL) {
pr_debug("Cannot find event: %s\n", event_name);
ret = -ENOENT;
goto out;
}
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
pr_debug("%s is not a tracepoint event\n", event_name);
ret = -EINVAL;
goto out;
}
#ifdef HAVE_LIBTRACEEVENT
if (!strcmp(field_name, "*")) {
ret = add_evsel_fields(evsel, raw_trace, level);
} else {
struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
if (field == NULL) {
pr_debug("Cannot find event field for %s.%s\n",
event_name, field_name);
return -ENOENT;
}
ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
}
#else
(void)level;
(void)raw_trace;
#endif /* HAVE_LIBTRACEEVENT */
out:
free(str);
return ret;
}
static int __sort_dimension__add(struct sort_dimension *sd,
struct perf_hpp_list *list,
int level)
{
if (sd->taken)
return 0;
if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
return -1;
if (sd->entry->se_collapse)
list->need_collapse = 1;
sd->taken = 1;
return 0;
}
static int __hpp_dimension__add(struct hpp_dimension *hd,
struct perf_hpp_list *list,
int level)
{
struct perf_hpp_fmt *fmt;
if (hd->taken)
return 0;
fmt = __hpp_dimension__alloc_hpp(hd, level);
if (!fmt)
return -1;
hd->taken = 1;
perf_hpp_list__register_sort_field(list, fmt);
return 0;
}
static int __sort_dimension__add_output(struct perf_hpp_list *list,
struct sort_dimension *sd)
{
if (sd->taken)
return 0;
if (__sort_dimension__add_hpp_output(sd, list) < 0)
return -1;
sd->taken = 1;
return 0;
}
static int __hpp_dimension__add_output(struct perf_hpp_list *list,
struct hpp_dimension *hd)
{
struct perf_hpp_fmt *fmt;
if (hd->taken)
return 0;
fmt = __hpp_dimension__alloc_hpp(hd, 0);
if (!fmt)
return -1;
hd->taken = 1;
perf_hpp_list__column_register(list, fmt);
return 0;
}
int hpp_dimension__add_output(unsigned col)
{
BUG_ON(col >= PERF_HPP__MAX_INDEX);
return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
}
int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
struct evlist *evlist,
int level)
{
unsigned int i, j;
/*
* Check to see if there are any arch specific
* sort dimensions not applicable for the current
* architecture. If so, Skip that sort key since
* we don't want to display it in the output fields.
*/
for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
if (!strcmp(arch_specific_sort_keys[j], tok) &&
!arch_support_sort_key(tok)) {
return 0;
}
}
for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
struct sort_dimension *sd = &common_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
if (sd->name && !strcmp(dynamic_headers[j], sd->name))
sort_dimension_add_dynamic_header(sd);
}
if (sd->entry == &sort_parent) {
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
if (ret) {
char err[BUFSIZ];
regerror(ret, &parent_regex, err, sizeof(err));
pr_err("Invalid regex: %s\n%s", parent_pattern, err);
return -EINVAL;
}
list->parent = 1;
} else if (sd->entry == &sort_sym) {
list->sym = 1;
/*
* perf diff displays the performance difference amongst
* two or more perf.data files. Those files could come
* from different binaries. So we should not compare
* their ips, but the name of symbol.
*/
if (sort__mode == SORT_MODE__DIFF)
sd->entry->se_collapse = sort__sym_sort;
} else if (sd->entry == &sort_dso) {
list->dso = 1;
} else if (sd->entry == &sort_socket) {
list->socket = 1;
} else if (sd->entry == &sort_thread) {
list->thread = 1;
} else if (sd->entry == &sort_comm) {
list->comm = 1;
}
return __sort_dimension__add(sd, list, level);
}
for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
struct hpp_dimension *hd = &hpp_sort_dimensions[i];
if (strncasecmp(tok, hd->name, strlen(tok)))
continue;
return __hpp_dimension__add(hd, list, level);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
struct sort_dimension *sd = &bstack_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sort__mode != SORT_MODE__BRANCH)
return -EINVAL;
if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
list->sym = 1;
__sort_dimension__add(sd, list, level);
return 0;
}
for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
struct sort_dimension *sd = &memory_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sort__mode != SORT_MODE__MEMORY)
return -EINVAL;
if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
return -EINVAL;
if (sd->entry == &sort_mem_daddr_sym)
list->sym = 1;
__sort_dimension__add(sd, list, level);
return 0;
}
if (!add_dynamic_entry(evlist, tok, level))
return 0;
return -ESRCH;
}
static int setup_sort_list(struct perf_hpp_list *list, char *str,
struct evlist *evlist)
{
char *tmp, *tok;
int ret = 0;
int level = 0;
int next_level = 1;
bool in_group = false;
do {
tok = str;
tmp = strpbrk(str, "{}, ");
if (tmp) {
if (in_group)
next_level = level;
else
next_level = level + 1;
if (*tmp == '{')
in_group = true;
else if (*tmp == '}')
in_group = false;
*tmp = '\0';
str = tmp + 1;
}
if (*tok) {
ret = sort_dimension__add(list, tok, evlist, level);
if (ret == -EINVAL) {
if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
else
ui__error("Invalid --sort key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
ui__error("Unknown --sort key: `%s'", tok);
break;
}
}
level = next_level;
} while (tmp);
return ret;
}
static const char *get_default_sort_order(struct evlist *evlist)
{
const char *default_sort_orders[] = {
default_sort_order,
default_branch_sort_order,
default_mem_sort_order,
default_top_sort_order,
default_diff_sort_order,
default_tracepoint_sort_order,
};
bool use_trace = true;
struct evsel *evsel;
BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
if (evlist == NULL || evlist__empty(evlist))
goto out_no_evlist;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
use_trace = false;
break;
}
}
if (use_trace) {
sort__mode = SORT_MODE__TRACEPOINT;
if (symbol_conf.raw_trace)
return "trace_fields";
}
out_no_evlist:
return default_sort_orders[sort__mode];
}
static int setup_sort_order(struct evlist *evlist)
{
char *new_sort_order;
/*
* Append '+'-prefixed sort order to the default sort
* order string.
*/
if (!sort_order || is_strict_order(sort_order))
return 0;
if (sort_order[1] == '\0') {
ui__error("Invalid --sort key: `+'");
return -EINVAL;
}
/*
* We allocate new sort_order string, but we never free it,
* because it's checked over the rest of the code.
*/
if (asprintf(&new_sort_order, "%s,%s",
get_default_sort_order(evlist), sort_order + 1) < 0) {
pr_err("Not enough memory to set up --sort");
return -ENOMEM;
}
sort_order = new_sort_order;
return 0;
}
/*
* Adds 'pre,' prefix into 'str' is 'pre' is
* not already part of 'str'.
*/
static char *prefix_if_not_in(const char *pre, char *str)
{
char *n;
if (!str || strstr(str, pre))
return str;
if (asprintf(&n, "%s,%s", pre, str) < 0)
n = NULL;
free(str);
return n;
}
static char *setup_overhead(char *keys)
{
if (sort__mode == SORT_MODE__DIFF)
return keys;
keys = prefix_if_not_in("overhead", keys);
if (symbol_conf.cumulate_callchain)
keys = prefix_if_not_in("overhead_children", keys);
return keys;
}
static int __setup_sorting(struct evlist *evlist)
{
char *str;
const char *sort_keys;
int ret = 0;
ret = setup_sort_order(evlist);
if (ret)
return ret;
sort_keys = sort_order;
if (sort_keys == NULL) {
if (is_strict_order(field_order)) {
/*
* If user specified field order but no sort order,
* we'll honor it and not add default sort orders.
*/
return 0;
}
sort_keys = get_default_sort_order(evlist);
}
str = strdup(sort_keys);
if (str == NULL) {
pr_err("Not enough memory to setup sort keys");
return -ENOMEM;
}
/*
* Prepend overhead fields for backward compatibility.
*/
if (!is_strict_order(field_order)) {
str = setup_overhead(str);
if (str == NULL) {
pr_err("Not enough memory to setup overhead keys");
return -ENOMEM;
}
}
ret = setup_sort_list(&perf_hpp_list, str, evlist);
free(str);
return ret;
}
void perf_hpp__set_elide(int idx, bool elide)
{
struct perf_hpp_fmt *fmt;
struct hpp_sort_entry *hse;
perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (hse->se->se_width_idx == idx) {
fmt->elide = elide;
break;
}
}
}
static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
{
if (list && strlist__nr_entries(list) == 1) {
if (fp != NULL)
fprintf(fp, "# %s: %s\n", list_name,
strlist__entry(list, 0)->s);
return true;
}
return false;
}
static bool get_elide(int idx, FILE *output)
{
switch (idx) {
case HISTC_SYMBOL:
return __get_elide(symbol_conf.sym_list, "symbol", output);
case HISTC_DSO:
return __get_elide(symbol_conf.dso_list, "dso", output);
case HISTC_COMM:
return __get_elide(symbol_conf.comm_list, "comm", output);
default:
break;
}
if (sort__mode != SORT_MODE__BRANCH)
return false;
switch (idx) {
case HISTC_SYMBOL_FROM:
return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
case HISTC_SYMBOL_TO:
return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
case HISTC_DSO_FROM:
return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
case HISTC_DSO_TO:
return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
case HISTC_ADDR_FROM:
return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
case HISTC_ADDR_TO:
return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
default:
break;
}
return false;
}
void sort__setup_elide(FILE *output)
{
struct perf_hpp_fmt *fmt;
struct hpp_sort_entry *hse;
perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
fmt->elide = get_elide(hse->se->se_width_idx, output);
}
/*
* It makes no sense to elide all of sort entries.
* Just revert them to show up again.
*/
perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
if (!fmt->elide)
return;
}
perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
fmt->elide = false;
}
}
int output_field_add(struct perf_hpp_list *list, char *tok)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
struct sort_dimension *sd = &common_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
return __sort_dimension__add_output(list, sd);
}
for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
struct hpp_dimension *hd = &hpp_sort_dimensions[i];
if (strncasecmp(tok, hd->name, strlen(tok)))
continue;
return __hpp_dimension__add_output(list, hd);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
struct sort_dimension *sd = &bstack_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sort__mode != SORT_MODE__BRANCH)
return -EINVAL;
return __sort_dimension__add_output(list, sd);
}
for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
struct sort_dimension *sd = &memory_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sort__mode != SORT_MODE__MEMORY)
return -EINVAL;
return __sort_dimension__add_output(list, sd);
}
return -ESRCH;
}
static int setup_output_list(struct perf_hpp_list *list, char *str)
{
char *tmp, *tok;
int ret = 0;
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = output_field_add(list, tok);
if (ret == -EINVAL) {
ui__error("Invalid --fields key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
ui__error("Unknown --fields key: `%s'", tok);
break;
}
}
return ret;
}
void reset_dimensions(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
common_sort_dimensions[i].taken = 0;
for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
hpp_sort_dimensions[i].taken = 0;
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
bstack_sort_dimensions[i].taken = 0;
for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
memory_sort_dimensions[i].taken = 0;
}
bool is_strict_order(const char *order)
{
return order && (*order != '+');
}
static int __setup_output_field(void)
{
char *str, *strp;
int ret = -EINVAL;
if (field_order == NULL)
return 0;
strp = str = strdup(field_order);
if (str == NULL) {
pr_err("Not enough memory to setup output fields");
return -ENOMEM;
}
if (!is_strict_order(field_order))
strp++;
if (!strlen(strp)) {
ui__error("Invalid --fields key: `+'");
goto out;
}
ret = setup_output_list(&perf_hpp_list, strp);
out:
free(str);
return ret;
}
int setup_sorting(struct evlist *evlist)
{
int err;
err = __setup_sorting(evlist);
if (err < 0)
return err;
if (parent_pattern != default_parent_pattern) {
err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
if (err < 0)
return err;
}
reset_dimensions();
/*
* perf diff doesn't use default hpp output fields.
*/
if (sort__mode != SORT_MODE__DIFF)
perf_hpp__init();
err = __setup_output_field();
if (err < 0)
return err;
/* copy sort keys to output fields */
perf_hpp__setup_output_field(&perf_hpp_list);
/* and then copy output fields to sort keys */
perf_hpp__append_sort_keys(&perf_hpp_list);
/* setup hists-specific output fields */
if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
return -1;
return 0;
}
void reset_output_field(void)
{
perf_hpp_list.need_collapse = 0;
perf_hpp_list.parent = 0;
perf_hpp_list.sym = 0;
perf_hpp_list.dso = 0;
field_order = NULL;
sort_order = NULL;
reset_dimensions();
perf_hpp__reset_output_field(&perf_hpp_list);
}
#define INDENT (3*8 + 1)
static void add_key(struct strbuf *sb, const char *str, int *llen)
{
if (!str)
return;
if (*llen >= 75) {
strbuf_addstr(sb, "\n\t\t\t ");
*llen = INDENT;
}
strbuf_addf(sb, " %s", str);
*llen += strlen(str) + 1;
}
static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
int *llen)
{
int i;
for (i = 0; i < n; i++)
add_key(sb, s[i].name, llen);
}
static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
int *llen)
{
int i;
for (i = 0; i < n; i++)
add_key(sb, s[i].name, llen);
}
char *sort_help(const char *prefix)
{
struct strbuf sb;
char *s;
int len = strlen(prefix) + INDENT;
strbuf_init(&sb, 300);
strbuf_addstr(&sb, prefix);
add_hpp_sort_string(&sb, hpp_sort_dimensions,
ARRAY_SIZE(hpp_sort_dimensions), &len);
add_sort_string(&sb, common_sort_dimensions,
ARRAY_SIZE(common_sort_dimensions), &len);
add_sort_string(&sb, bstack_sort_dimensions,
ARRAY_SIZE(bstack_sort_dimensions), &len);
add_sort_string(&sb, memory_sort_dimensions,
ARRAY_SIZE(memory_sort_dimensions), &len);
s = strbuf_detach(&sb, NULL);
strbuf_release(&sb);
return s;
}
| linux-master | tools/perf/util/sort.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* svghelper.c - helper functions for outputting svg
*
* (C) Copyright 2009 Intel Corporation
*
* Authors:
* Arjan van de Ven <[email protected]>
*/
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <linux/bitmap.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "env.h"
#include "svghelper.h"
static u64 first_time, last_time;
static u64 turbo_frequency, max_freq;
#define SLOT_MULT 30.0
#define SLOT_HEIGHT 25.0
#define SLOT_HALF (SLOT_HEIGHT / 2)
int svg_page_width = 1000;
u64 svg_highlight;
const char *svg_highlight_name;
#define MIN_TEXT_SIZE 0.01
static u64 total_height;
static FILE *svgfile;
static double cpu2slot(int cpu)
{
return 2 * cpu + 1;
}
static int *topology_map;
static double cpu2y(int cpu)
{
if (topology_map)
return cpu2slot(topology_map[cpu]) * SLOT_MULT;
else
return cpu2slot(cpu) * SLOT_MULT;
}
static double time2pixels(u64 __time)
{
double X;
X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time);
return X;
}
/*
* Round text sizes so that the svg viewer only needs a discrete
* number of renderings of the font
*/
static double round_text_size(double size)
{
int loop = 100;
double target = 10.0;
if (size >= 10.0)
return size;
while (loop--) {
if (size >= target)
return target;
target = target / 2.0;
}
return size;
}
void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
{
int new_width;
svgfile = fopen(filename, "w");
if (!svgfile) {
fprintf(stderr, "Cannot open %s for output\n", filename);
return;
}
first_time = start;
first_time = first_time / 100000000 * 100000000;
last_time = end;
/*
* if the recording is short, we default to a width of 1000, but
* for longer recordings we want at least 200 units of width per second
*/
new_width = (last_time - first_time) / 5000000;
if (new_width > svg_page_width)
svg_page_width = new_width;
total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
fprintf(svgfile, "<!DOCTYPE svg SYSTEM \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n");
fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n");
fprintf(svgfile, " rect { stroke-width: 1; }\n");
fprintf(svgfile, " rect.process { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.process3 { fill:rgb(180,180,180); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.sample_hi{ fill:rgb(255,128, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.error { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.net { fill:rgb( 0,128, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.disk { fill:rgb( 0, 0,255); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.sync { fill:rgb(128,128, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.poll { fill:rgb( 0,128,128); fill-opacity:0.2; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.cpu { fill:rgb(192,192,192); fill-opacity:0.2; stroke-width:0.5; stroke:rgb(128,128,128); } \n");
fprintf(svgfile, " rect.pstate { fill:rgb(128,128,128); fill-opacity:0.8; stroke-width:0; } \n");
fprintf(svgfile, " rect.c1 { fill:rgb(255,214,214); fill-opacity:0.5; stroke-width:0; } \n");
fprintf(svgfile, " rect.c2 { fill:rgb(255,172,172); fill-opacity:0.5; stroke-width:0; } \n");
fprintf(svgfile, " rect.c3 { fill:rgb(255,130,130); fill-opacity:0.5; stroke-width:0; } \n");
fprintf(svgfile, " rect.c4 { fill:rgb(255, 88, 88); fill-opacity:0.5; stroke-width:0; } \n");
fprintf(svgfile, " rect.c5 { fill:rgb(255, 44, 44); fill-opacity:0.5; stroke-width:0; } \n");
fprintf(svgfile, " rect.c6 { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; } \n");
fprintf(svgfile, " line.pstate { stroke:rgb(255,255, 0); stroke-opacity:0.8; stroke-width:2; } \n");
fprintf(svgfile, " ]]>\n </style>\n</defs>\n");
}
static double normalize_height(double height)
{
if (height < 0.25)
return 0.25;
else if (height < 0.50)
return 0.50;
else if (height < 0.75)
return 0.75;
else
return 0.100;
}
void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
{
double w = time2pixels(end) - time2pixels(start);
height = normalize_height(height);
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
time2pixels(start),
w,
Yslot * SLOT_MULT,
SLOT_HALF * height,
type);
fprintf(svgfile, "</g>\n");
}
void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
{
double w = time2pixels(end) - time2pixels(start);
height = normalize_height(height);
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
time2pixels(start),
w,
Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HALF * height,
SLOT_HALF * height,
type);
fprintf(svgfile, "</g>\n");
}
void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
{
double w = time2pixels(end) - time2pixels(start);
height = normalize_height(height);
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
time2pixels(start),
w,
Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HEIGHT * height,
SLOT_HEIGHT * height,
type);
fprintf(svgfile, "</g>\n");
}
void svg_box(int Yslot, u64 start, u64 end, const char *type)
{
if (!svgfile)
return;
fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
}
static char *time_to_string(u64 duration);
void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
{
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<title>#%d blocked %s</title>\n", cpu,
time_to_string(end - start));
if (backtrace)
fprintf(svgfile, "<desc>Blocked on:\n%s</desc>\n", backtrace);
svg_box(Yslot, start, end, "blocked");
fprintf(svgfile, "</g>\n");
}
void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
{
double text_size;
const char *type;
if (!svgfile)
return;
if (svg_highlight && end - start > svg_highlight)
type = "sample_hi";
else
type = "sample";
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<title>#%d running %s</title>\n",
cpu, time_to_string(end - start));
if (backtrace)
fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT,
type);
text_size = (time2pixels(end)-time2pixels(start));
if (cpu > 9)
text_size = text_size/2;
if (text_size > 1.25)
text_size = 1.25;
text_size = round_text_size(text_size);
if (text_size > MIN_TEXT_SIZE)
fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"%.8fpt\">%i</text>\n",
time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1);
fprintf(svgfile, "</g>\n");
}
static char *time_to_string(u64 duration)
{
static char text[80];
text[0] = 0;
if (duration < NSEC_PER_USEC) /* less than 1 usec */
return text;
if (duration < NSEC_PER_MSEC) { /* less than 1 msec */
sprintf(text, "%.1f us", duration / (double)NSEC_PER_USEC);
return text;
}
sprintf(text, "%.1f ms", duration / (double)NSEC_PER_MSEC);
return text;
}
void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
{
char *text;
const char *style;
double font_size;
if (!svgfile)
return;
style = "waiting";
if (end-start > 10 * NSEC_PER_MSEC) /* 10 msec */
style = "WAITING";
text = time_to_string(end-start);
font_size = 1.0 * (time2pixels(end)-time2pixels(start));
if (font_size > 3)
font_size = 3;
font_size = round_text_size(font_size);
fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start));
if (backtrace)
fprintf(svgfile, "<desc>Waiting on:\n%s</desc>\n", backtrace);
fprintf(svgfile, "<rect x=\"0\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
if (font_size > MIN_TEXT_SIZE)
fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%.8fpt\"> %s</text>\n",
font_size, text);
fprintf(svgfile, "</g>\n");
}
static char *cpu_model(void)
{
static char cpu_m[255];
char buf[256];
FILE *file;
cpu_m[0] = 0;
/* CPU type */
file = fopen("/proc/cpuinfo", "r");
if (file) {
while (fgets(buf, 255, file)) {
if (strcasestr(buf, "model name")) {
strlcpy(cpu_m, &buf[13], 255);
break;
}
}
fclose(file);
}
/* CPU type */
file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r");
if (file) {
while (fgets(buf, 255, file)) {
unsigned int freq;
freq = strtoull(buf, NULL, 10);
if (freq > max_freq)
max_freq = freq;
}
fclose(file);
}
return cpu_m;
}
void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
{
char cpu_string[80];
if (!svgfile)
return;
max_freq = __max_freq;
turbo_frequency = __turbo_freq;
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"cpu\"/>\n",
time2pixels(first_time),
time2pixels(last_time)-time2pixels(first_time),
cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
sprintf(cpu_string, "CPU %i", (int)cpu);
fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\">%s</text>\n",
10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
fprintf(svgfile, "<text transform=\"translate(%.8f,%.8f)\" font-size=\"1.25pt\">%s</text>\n",
10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
fprintf(svgfile, "</g>\n");
}
void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace)
{
double width;
const char *type;
if (!svgfile)
return;
if (svg_highlight && end - start >= svg_highlight)
type = "sample_hi";
else if (svg_highlight_name && strstr(name, svg_highlight_name))
type = "sample_hi";
else
type = "sample";
fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), cpu2y(cpu));
fprintf(svgfile, "<title>%d %s running %s</title>\n", pid, name, time_to_string(end - start));
if (backtrace)
fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
fprintf(svgfile, "<rect x=\"0\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
width = time2pixels(end)-time2pixels(start);
if (width > 6)
width = 6;
width = round_text_size(width);
if (width > MIN_TEXT_SIZE)
fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%.8fpt\">%s</text>\n",
width, name);
fprintf(svgfile, "</g>\n");
}
void svg_cstate(int cpu, u64 start, u64 end, int type)
{
double width;
char style[128];
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
if (type > 6)
type = 6;
sprintf(style, "c%i", type);
fprintf(svgfile, "<rect class=\"%s\" x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\"/>\n",
style,
time2pixels(start), time2pixels(end)-time2pixels(start),
cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
width = (time2pixels(end)-time2pixels(start))/2.0;
if (width > 6)
width = 6;
width = round_text_size(width);
if (width > MIN_TEXT_SIZE)
fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"%.8fpt\">C%i</text>\n",
time2pixels(start), cpu2y(cpu)+width, width, type);
fprintf(svgfile, "</g>\n");
}
static char *HzToHuman(unsigned long hz)
{
static char buffer[1024];
unsigned long long Hz;
memset(buffer, 0, 1024);
Hz = hz;
/* default: just put the Number in */
sprintf(buffer, "%9lli", Hz);
if (Hz > 1000)
sprintf(buffer, " %6lli Mhz", (Hz+500)/1000);
if (Hz > 1500000)
sprintf(buffer, " %6.2f Ghz", (Hz+5000.0)/1000000);
if (Hz == turbo_frequency)
sprintf(buffer, "Turbo");
return buffer;
}
void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
{
double height = 0;
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
if (max_freq)
height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
fprintf(svgfile, "<line x1=\"%.8f\" x2=\"%.8f\" y1=\"%.1f\" y2=\"%.1f\" class=\"pstate\"/>\n",
time2pixels(start), time2pixels(end), height, height);
fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"0.25pt\">%s</text>\n",
time2pixels(start), height+0.9, HzToHuman(freq));
fprintf(svgfile, "</g>\n");
}
void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace)
{
double height;
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<title>%s wakes up %s</title>\n",
desc1 ? desc1 : "?",
desc2 ? desc2 : "?");
if (backtrace)
fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
if (row1 < row2) {
if (row1) {
fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
if (desc2)
fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s ></text></g>\n",
time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2);
}
if (row2) {
fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row2 * SLOT_MULT);
if (desc1)
fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s ></text></g>\n",
time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1);
}
} else {
if (row2) {
fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
if (desc1)
fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s <</text></g>\n",
time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1);
}
if (row1) {
fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row1 * SLOT_MULT);
if (desc2)
fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s <</text></g>\n",
time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2);
}
}
height = row1 * SLOT_MULT;
if (row2 > row1)
height += SLOT_HEIGHT;
if (row1)
fprintf(svgfile, "<circle cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
time2pixels(start), height);
fprintf(svgfile, "</g>\n");
}
void svg_wakeline(u64 start, int row1, int row2, const char *backtrace)
{
double height;
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
if (backtrace)
fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
if (row1 < row2)
fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT);
else
fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT);
height = row1 * SLOT_MULT;
if (row2 > row1)
height += SLOT_HEIGHT;
fprintf(svgfile, "<circle cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
time2pixels(start), height);
fprintf(svgfile, "</g>\n");
}
void svg_interrupt(u64 start, int row, const char *backtrace)
{
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
fprintf(svgfile, "<title>Wakeup from interrupt</title>\n");
if (backtrace)
fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
fprintf(svgfile, "<circle cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
time2pixels(start), row * SLOT_MULT);
fprintf(svgfile, "<circle cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
fprintf(svgfile, "</g>\n");
}
void svg_text(int Yslot, u64 start, const char *text)
{
if (!svgfile)
return;
fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\">%s</text>\n",
time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text);
}
static void svg_legenda_box(int X, const char *text, const char *style)
{
double boxsize;
boxsize = SLOT_HEIGHT / 2;
fprintf(svgfile, "<rect x=\"%i\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
X, boxsize, boxsize, style);
fprintf(svgfile, "<text transform=\"translate(%.8f, %.8f)\" font-size=\"%.8fpt\">%s</text>\n",
X + boxsize + 5, boxsize, 0.8 * boxsize, text);
}
void svg_io_legenda(void)
{
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
svg_legenda_box(0, "Disk", "disk");
svg_legenda_box(100, "Network", "net");
svg_legenda_box(200, "Sync", "sync");
svg_legenda_box(300, "Poll", "poll");
svg_legenda_box(400, "Error", "error");
fprintf(svgfile, "</g>\n");
}
void svg_legenda(void)
{
if (!svgfile)
return;
fprintf(svgfile, "<g>\n");
svg_legenda_box(0, "Running", "sample");
svg_legenda_box(100, "Idle","c1");
svg_legenda_box(200, "Deeper Idle", "c3");
svg_legenda_box(350, "Deepest Idle", "c6");
svg_legenda_box(550, "Sleeping", "process2");
svg_legenda_box(650, "Waiting for cpu", "waiting");
svg_legenda_box(800, "Blocked on IO", "blocked");
fprintf(svgfile, "</g>\n");
}
void svg_time_grid(double min_thickness)
{
u64 i;
if (!svgfile)
return;
i = first_time;
while (i < last_time) {
int color = 220;
double thickness = 0.075;
if ((i % 100000000) == 0) {
thickness = 0.5;
color = 192;
}
if ((i % 1000000000) == 0) {
thickness = 2.0;
color = 128;
}
if (thickness >= min_thickness)
fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%.3f\"/>\n",
time2pixels(i), SLOT_MULT/2, time2pixels(i),
total_height, color, color, color, thickness);
i += 10000000;
}
}
void svg_close(void)
{
if (svgfile) {
fprintf(svgfile, "</svg>\n");
fclose(svgfile);
svgfile = NULL;
}
}
#define cpumask_bits(maskp) ((maskp)->bits)
typedef struct { DECLARE_BITMAP(bits, MAX_NR_CPUS); } cpumask_t;
struct topology {
cpumask_t *sib_core;
int sib_core_nr;
cpumask_t *sib_thr;
int sib_thr_nr;
};
static void scan_thread_topology(int *map, struct topology *t, int cpu,
int *pos, int nr_cpus)
{
int i;
int thr;
for (i = 0; i < t->sib_thr_nr; i++) {
if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i])))
continue;
for_each_set_bit(thr, cpumask_bits(&t->sib_thr[i]), nr_cpus)
if (map[thr] == -1)
map[thr] = (*pos)++;
}
}
static void scan_core_topology(int *map, struct topology *t, int nr_cpus)
{
int pos = 0;
int i;
int cpu;
for (i = 0; i < t->sib_core_nr; i++)
for_each_set_bit(cpu, cpumask_bits(&t->sib_core[i]), nr_cpus)
scan_thread_topology(map, t, cpu, &pos, nr_cpus);
}
static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
{
int i;
int ret = 0;
struct perf_cpu_map *m;
struct perf_cpu c;
m = perf_cpu_map__new(s);
if (!m)
return -1;
for (i = 0; i < perf_cpu_map__nr(m); i++) {
c = perf_cpu_map__cpu(m, i);
if (c.cpu >= nr_cpus) {
ret = -1;
break;
}
__set_bit(c.cpu, cpumask_bits(b));
}
perf_cpu_map__put(m);
return ret;
}
int svg_build_topology_map(struct perf_env *env)
{
int i, nr_cpus;
struct topology t;
char *sib_core, *sib_thr;
nr_cpus = min(env->nr_cpus_online, MAX_NR_CPUS);
t.sib_core_nr = env->nr_sibling_cores;
t.sib_thr_nr = env->nr_sibling_threads;
t.sib_core = calloc(env->nr_sibling_cores, sizeof(cpumask_t));
t.sib_thr = calloc(env->nr_sibling_threads, sizeof(cpumask_t));
sib_core = env->sibling_cores;
sib_thr = env->sibling_threads;
if (!t.sib_core || !t.sib_thr) {
fprintf(stderr, "topology: no memory\n");
goto exit;
}
for (i = 0; i < env->nr_sibling_cores; i++) {
if (str_to_bitmap(sib_core, &t.sib_core[i], nr_cpus)) {
fprintf(stderr, "topology: can't parse siblings map\n");
goto exit;
}
sib_core += strlen(sib_core) + 1;
}
for (i = 0; i < env->nr_sibling_threads; i++) {
if (str_to_bitmap(sib_thr, &t.sib_thr[i], nr_cpus)) {
fprintf(stderr, "topology: can't parse siblings map\n");
goto exit;
}
sib_thr += strlen(sib_thr) + 1;
}
topology_map = malloc(sizeof(int) * nr_cpus);
if (!topology_map) {
fprintf(stderr, "topology: no memory\n");
goto exit;
}
for (i = 0; i < nr_cpus; i++)
topology_map[i] = -1;
scan_core_topology(topology_map, &t, nr_cpus);
return 0;
exit:
zfree(&t.sib_core);
zfree(&t.sib_thr);
return -1;
}
| linux-master | tools/perf/util/svghelper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* call-path.h: Manipulate a tree data structure containing function call paths
* Copyright (c) 2014, Intel Corporation.
*/
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include "call-path.h"
static void call_path__init(struct call_path *cp, struct call_path *parent,
struct symbol *sym, u64 ip, bool in_kernel)
{
cp->parent = parent;
cp->sym = sym;
cp->ip = sym ? 0 : ip;
cp->db_id = 0;
cp->in_kernel = in_kernel;
RB_CLEAR_NODE(&cp->rb_node);
cp->children = RB_ROOT;
}
struct call_path_root *call_path_root__new(void)
{
struct call_path_root *cpr;
cpr = zalloc(sizeof(struct call_path_root));
if (!cpr)
return NULL;
call_path__init(&cpr->call_path, NULL, NULL, 0, false);
INIT_LIST_HEAD(&cpr->blocks);
return cpr;
}
void call_path_root__free(struct call_path_root *cpr)
{
struct call_path_block *pos, *n;
list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
list_del_init(&pos->node);
free(pos);
}
free(cpr);
}
static struct call_path *call_path__new(struct call_path_root *cpr,
struct call_path *parent,
struct symbol *sym, u64 ip,
bool in_kernel)
{
struct call_path_block *cpb;
struct call_path *cp;
size_t n;
if (cpr->next < cpr->sz) {
cpb = list_last_entry(&cpr->blocks, struct call_path_block,
node);
} else {
cpb = zalloc(sizeof(struct call_path_block));
if (!cpb)
return NULL;
list_add_tail(&cpb->node, &cpr->blocks);
cpr->sz += CALL_PATH_BLOCK_SIZE;
}
n = cpr->next++ & CALL_PATH_BLOCK_MASK;
cp = &cpb->cp[n];
call_path__init(cp, parent, sym, ip, in_kernel);
return cp;
}
struct call_path *call_path__findnew(struct call_path_root *cpr,
struct call_path *parent,
struct symbol *sym, u64 ip, u64 ks)
{
struct rb_node **p;
struct rb_node *node_parent = NULL;
struct call_path *cp;
bool in_kernel = ip >= ks;
if (sym)
ip = 0;
if (!parent)
return call_path__new(cpr, parent, sym, ip, in_kernel);
p = &parent->children.rb_node;
while (*p != NULL) {
node_parent = *p;
cp = rb_entry(node_parent, struct call_path, rb_node);
if (cp->sym == sym && cp->ip == ip)
return cp;
if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
cp = call_path__new(cpr, parent, sym, ip, in_kernel);
if (!cp)
return NULL;
rb_link_node(&cp->rb_node, node_parent, p);
rb_insert_color(&cp->rb_node, &parent->children);
return cp;
}
| linux-master | tools/perf/util/call-path.c |
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <api/fs/fs.h>
#include <linux/kernel.h>
#include "map_symbol.h"
#include "mem-events.h"
#include "debug.h"
#include "symbol.h"
#include "pmu.h"
#include "pmus.h"
unsigned int perf_mem_events__loads_ldlat = 30;
#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"),
E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"),
E(NULL, NULL, NULL),
};
#undef E
static char mem_loads_name[100];
static bool mem_loads_name__init;
struct perf_mem_event * __weak perf_mem_events__ptr(int i)
{
if (i >= PERF_MEM_EVENTS__MAX)
return NULL;
return &perf_mem_events[i];
}
const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
if (!e)
return NULL;
if (i == PERF_MEM_EVENTS__LOAD) {
if (!mem_loads_name__init) {
mem_loads_name__init = true;
scnprintf(mem_loads_name, sizeof(mem_loads_name),
e->name, perf_mem_events__loads_ldlat);
}
return mem_loads_name;
}
return e->name;
}
__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
{
return false;
}
int perf_mem_events__parse(const char *str)
{
char *tok, *saveptr = NULL;
bool found = false;
char *buf;
int j;
/* We need buffer that we know we can write to. */
buf = malloc(strlen(str) + 1);
if (!buf)
return -ENOMEM;
strcpy(buf, str);
tok = strtok_r((char *)buf, ",", &saveptr);
while (tok) {
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j);
if (!e->tag)
continue;
if (strstr(e->tag, tok))
e->record = found = true;
}
tok = strtok_r(NULL, ",", &saveptr);
}
free(buf);
if (found)
return 0;
pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
return -1;
}
static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
{
char path[PATH_MAX];
struct stat st;
scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
return !stat(path, &st);
}
int perf_mem_events__init(void)
{
const char *mnt = sysfs__mount();
bool found = false;
int j;
if (!mnt)
return -ENOENT;
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j);
char sysfs_name[100];
struct perf_pmu *pmu = NULL;
/*
* If the event entry isn't valid, skip initialization
* and "e->supported" will keep false.
*/
if (!e->tag)
continue;
/*
* Scan all PMUs not just core ones, since perf mem/c2c on
* platforms like AMD uses IBS OP PMU which is independent
* of core PMU.
*/
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
e->supported |= perf_mem_event__supported(mnt, sysfs_name);
}
if (e->supported)
found = true;
}
return found ? 0 : -ENOENT;
}
void perf_mem_events__list(void)
{
int j;
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j);
fprintf(stderr, "%-*s%-*s%s",
e->tag ? 13 : 0,
e->tag ? : "",
e->tag && verbose > 0 ? 25 : 0,
e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "",
e->supported ? ": available\n" : "");
}
}
static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
int idx)
{
const char *mnt = sysfs__mount();
char sysfs_name[100];
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
pmu->name);
if (!perf_mem_event__supported(mnt, sysfs_name)) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(idx, pmu->name));
}
}
}
int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
char **rec_tmp, int *tmp_nr)
{
int i = *argv_nr, k = 0;
struct perf_mem_event *e;
struct perf_pmu *pmu;
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
e = perf_mem_events__ptr(j);
if (!e->record)
continue;
if (perf_pmus__num_mem_pmus() == 1) {
if (!e->supported) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(j, NULL));
return -1;
}
rec_argv[i++] = "-e";
rec_argv[i++] = perf_mem_events__name(j, NULL);
} else {
if (!e->supported) {
perf_mem_events__print_unsupport_hybrid(e, j);
return -1;
}
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
const char *s = perf_mem_events__name(j, pmu->name);
rec_argv[i++] = "-e";
if (s) {
char *copy = strdup(s);
if (!copy)
return -1;
rec_argv[i++] = copy;
rec_tmp[k++] = copy;
}
}
}
}
*argv_nr = i;
*tmp_nr = k;
return 0;
}
static const char * const tlb_access[] = {
"N/A",
"HIT",
"MISS",
"L1",
"L2",
"Walker",
"Fault",
};
int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
size_t l = 0, i;
u64 m = PERF_MEM_TLB_NA;
u64 hit, miss;
sz -= 1; /* -1 for null termination */
out[0] = '\0';
if (mem_info)
m = mem_info->data_src.mem_dtlb;
hit = m & PERF_MEM_TLB_HIT;
miss = m & PERF_MEM_TLB_MISS;
/* already taken care of */
m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
if (!(m & 0x1))
continue;
if (l) {
strcat(out, " or ");
l += 4;
}
l += scnprintf(out + l, sz - l, tlb_access[i]);
}
if (*out == '\0')
l += scnprintf(out, sz - l, "N/A");
if (hit)
l += scnprintf(out + l, sz - l, " hit");
if (miss)
l += scnprintf(out + l, sz - l, " miss");
return l;
}
static const char * const mem_lvl[] = {
"N/A",
"HIT",
"MISS",
"L1",
"LFB/MAB",
"L2",
"L3",
"Local RAM",
"Remote RAM (1 hop)",
"Remote RAM (2 hops)",
"Remote Cache (1 hop)",
"Remote Cache (2 hops)",
"I/O",
"Uncached",
};
static const char * const mem_lvlnum[] = {
[PERF_MEM_LVLNUM_UNC] = "Uncached",
[PERF_MEM_LVLNUM_CXL] = "CXL",
[PERF_MEM_LVLNUM_IO] = "I/O",
[PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
[PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
[PERF_MEM_LVLNUM_RAM] = "RAM",
[PERF_MEM_LVLNUM_PMEM] = "PMEM",
[PERF_MEM_LVLNUM_NA] = "N/A",
};
static const char * const mem_hops[] = {
"N/A",
/*
* While printing, 'Remote' will be added to represent
* 'Remote core, same node' accesses as remote field need
* to be set with mem_hops field.
*/
"core, same node",
"node, same socket",
"socket, same board",
"board",
};
static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
u64 op = PERF_MEM_LOCK_NA;
int l;
if (mem_info)
op = mem_info->data_src.mem_op;
if (op & PERF_MEM_OP_NA)
l = scnprintf(out, sz, "N/A");
else if (op & PERF_MEM_OP_LOAD)
l = scnprintf(out, sz, "LOAD");
else if (op & PERF_MEM_OP_STORE)
l = scnprintf(out, sz, "STORE");
else if (op & PERF_MEM_OP_PFETCH)
l = scnprintf(out, sz, "PFETCH");
else if (op & PERF_MEM_OP_EXEC)
l = scnprintf(out, sz, "EXEC");
else
l = scnprintf(out, sz, "No");
return l;
}
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
union perf_mem_data_src data_src;
int printed = 0;
size_t l = 0;
size_t i;
int lvl;
char hit_miss[5] = {0};
sz -= 1; /* -1 for null termination */
out[0] = '\0';
if (!mem_info)
goto na;
data_src = mem_info->data_src;
if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
memcpy(hit_miss, "hit", 3);
else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
memcpy(hit_miss, "miss", 4);
lvl = data_src.mem_lvl_num;
if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
if (data_src.mem_remote) {
strcat(out, "Remote ");
l += 7;
}
if (data_src.mem_hops)
l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
if (mem_lvlnum[lvl])
l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
else
l += scnprintf(out + l, sz - l, "L%d", lvl);
l += scnprintf(out + l, sz - l, " %s", hit_miss);
return l;
}
lvl = data_src.mem_lvl;
if (!lvl)
goto na;
lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
if (!lvl)
goto na;
for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
if (!(lvl & 0x1))
continue;
if (printed++) {
strcat(out, " or ");
l += 4;
}
l += scnprintf(out + l, sz - l, mem_lvl[i]);
}
if (printed) {
l += scnprintf(out + l, sz - l, " %s", hit_miss);
return l;
}
na:
strcat(out, "N/A");
return 3;
}
static const char * const snoop_access[] = {
"N/A",
"None",
"Hit",
"Miss",
"HitM",
};
static const char * const snoopx_access[] = {
"Fwd",
"Peer",
};
int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
size_t i, l = 0;
u64 m = PERF_MEM_SNOOP_NA;
sz -= 1; /* -1 for null termination */
out[0] = '\0';
if (mem_info)
m = mem_info->data_src.mem_snoop;
for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
if (!(m & 0x1))
continue;
if (l) {
strcat(out, " or ");
l += 4;
}
l += scnprintf(out + l, sz - l, snoop_access[i]);
}
m = 0;
if (mem_info)
m = mem_info->data_src.mem_snoopx;
for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
if (!(m & 0x1))
continue;
if (l) {
strcat(out, " or ");
l += 4;
}
l += scnprintf(out + l, sz - l, snoopx_access[i]);
}
if (*out == '\0')
l += scnprintf(out, sz - l, "N/A");
return l;
}
int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
u64 mask = PERF_MEM_LOCK_NA;
int l;
if (mem_info)
mask = mem_info->data_src.mem_lock;
if (mask & PERF_MEM_LOCK_NA)
l = scnprintf(out, sz, "N/A");
else if (mask & PERF_MEM_LOCK_LOCKED)
l = scnprintf(out, sz, "Yes");
else
l = scnprintf(out, sz, "No");
return l;
}
int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
size_t l = 0;
u64 mask = PERF_MEM_BLK_NA;
sz -= 1; /* -1 for null termination */
out[0] = '\0';
if (mem_info)
mask = mem_info->data_src.mem_blk;
if (!mask || (mask & PERF_MEM_BLK_NA)) {
l += scnprintf(out + l, sz - l, " N/A");
return l;
}
if (mask & PERF_MEM_BLK_DATA)
l += scnprintf(out + l, sz - l, " Data");
if (mask & PERF_MEM_BLK_ADDR)
l += scnprintf(out + l, sz - l, " Addr");
return l;
}
int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
int i = 0;
i += scnprintf(out, sz, "|OP ");
i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
i += scnprintf(out + i, sz - i, "|LVL ");
i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
i += scnprintf(out + i, sz - i, "|SNP ");
i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
i += scnprintf(out + i, sz - i, "|TLB ");
i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
i += scnprintf(out + i, sz - i, "|LCK ");
i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
i += scnprintf(out + i, sz - i, "|BLK ");
i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
return i;
}
int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
{
union perf_mem_data_src *data_src = &mi->data_src;
u64 daddr = mi->daddr.addr;
u64 op = data_src->mem_op;
u64 lvl = data_src->mem_lvl;
u64 snoop = data_src->mem_snoop;
u64 snoopx = data_src->mem_snoopx;
u64 lock = data_src->mem_lock;
u64 blk = data_src->mem_blk;
/*
* Skylake might report unknown remote level via this
* bit, consider it when evaluating remote HITMs.
*
* Incase of power, remote field can also be used to denote cache
* accesses from the another core of same node. Hence, setting
* mrem only when HOPS is zero along with set remote field.
*/
bool mrem = (data_src->mem_remote && !data_src->mem_hops);
int err = 0;
#define HITM_INC(__f) \
do { \
stats->__f++; \
stats->tot_hitm++; \
} while (0)
#define PEER_INC(__f) \
do { \
stats->__f++; \
stats->tot_peer++; \
} while (0)
#define P(a, b) PERF_MEM_##a##_##b
stats->nr_entries++;
if (lock & P(LOCK, LOCKED)) stats->locks++;
if (blk & P(BLK, DATA)) stats->blk_data++;
if (blk & P(BLK, ADDR)) stats->blk_addr++;
if (op & P(OP, LOAD)) {
/* load */
stats->load++;
if (!daddr) {
stats->ld_noadrs++;
return -1;
}
if (lvl & P(LVL, HIT)) {
if (lvl & P(LVL, UNC)) stats->ld_uncache++;
if (lvl & P(LVL, IO)) stats->ld_io++;
if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
if (lvl & P(LVL, L2)) {
stats->ld_l2hit++;
if (snoopx & P(SNOOPX, PEER))
PEER_INC(lcl_peer);
}
if (lvl & P(LVL, L3 )) {
if (snoop & P(SNOOP, HITM))
HITM_INC(lcl_hitm);
else
stats->ld_llchit++;
if (snoopx & P(SNOOPX, PEER))
PEER_INC(lcl_peer);
}
if (lvl & P(LVL, LOC_RAM)) {
stats->lcl_dram++;
if (snoop & P(SNOOP, HIT))
stats->ld_shared++;
else
stats->ld_excl++;
}
if ((lvl & P(LVL, REM_RAM1)) ||
(lvl & P(LVL, REM_RAM2)) ||
mrem) {
stats->rmt_dram++;
if (snoop & P(SNOOP, HIT))
stats->ld_shared++;
else
stats->ld_excl++;
}
}
if ((lvl & P(LVL, REM_CCE1)) ||
(lvl & P(LVL, REM_CCE2)) ||
mrem) {
if (snoop & P(SNOOP, HIT)) {
stats->rmt_hit++;
} else if (snoop & P(SNOOP, HITM)) {
HITM_INC(rmt_hitm);
} else if (snoopx & P(SNOOPX, PEER)) {
stats->rmt_hit++;
PEER_INC(rmt_peer);
}
}
if ((lvl & P(LVL, MISS)))
stats->ld_miss++;
} else if (op & P(OP, STORE)) {
/* store */
stats->store++;
if (!daddr) {
stats->st_noadrs++;
return -1;
}
if (lvl & P(LVL, HIT)) {
if (lvl & P(LVL, UNC)) stats->st_uncache++;
if (lvl & P(LVL, L1 )) stats->st_l1hit++;
}
if (lvl & P(LVL, MISS))
if (lvl & P(LVL, L1)) stats->st_l1miss++;
if (lvl & P(LVL, NA))
stats->st_na++;
} else {
/* unparsable data_src? */
stats->noparse++;
return -1;
}
if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
stats->nomap++;
return -1;
}
#undef P
#undef HITM_INC
return err;
}
void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
{
stats->nr_entries += add->nr_entries;
stats->locks += add->locks;
stats->store += add->store;
stats->st_uncache += add->st_uncache;
stats->st_noadrs += add->st_noadrs;
stats->st_l1hit += add->st_l1hit;
stats->st_l1miss += add->st_l1miss;
stats->st_na += add->st_na;
stats->load += add->load;
stats->ld_excl += add->ld_excl;
stats->ld_shared += add->ld_shared;
stats->ld_uncache += add->ld_uncache;
stats->ld_io += add->ld_io;
stats->ld_miss += add->ld_miss;
stats->ld_noadrs += add->ld_noadrs;
stats->ld_fbhit += add->ld_fbhit;
stats->ld_l1hit += add->ld_l1hit;
stats->ld_l2hit += add->ld_l2hit;
stats->ld_llchit += add->ld_llchit;
stats->lcl_hitm += add->lcl_hitm;
stats->rmt_hitm += add->rmt_hitm;
stats->tot_hitm += add->tot_hitm;
stats->lcl_peer += add->lcl_peer;
stats->rmt_peer += add->rmt_peer;
stats->tot_peer += add->tot_peer;
stats->rmt_hit += add->rmt_hit;
stats->lcl_dram += add->lcl_dram;
stats->rmt_dram += add->rmt_dram;
stats->blk_data += add->blk_data;
stats->blk_addr += add->blk_addr;
stats->nomap += add->nomap;
stats->noparse += add->noparse;
}
| linux-master | tools/perf/util/mem-events.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (c) 2009 Arnaldo Carvalho de Melo <[email protected]>
*/
#include "strlist.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <linux/zalloc.h>
static
struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
{
const char *s = entry;
struct rb_node *rc = NULL;
struct strlist *strlist = container_of(rblist, struct strlist, rblist);
struct str_node *snode = malloc(sizeof(*snode));
if (snode != NULL) {
if (strlist->dupstr) {
s = strdup(s);
if (s == NULL)
goto out_delete;
}
snode->s = s;
rc = &snode->rb_node;
}
return rc;
out_delete:
free(snode);
return NULL;
}
static void str_node__delete(struct str_node *snode, bool dupstr)
{
if (dupstr)
zfree((char **)&snode->s);
free(snode);
}
static
void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node)
{
struct strlist *slist = container_of(rblist, struct strlist, rblist);
struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
str_node__delete(snode, slist->dupstr);
}
static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
{
const char *str = entry;
struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
return strcmp(snode->s, str);
}
int strlist__add(struct strlist *slist, const char *new_entry)
{
return rblist__add_node(&slist->rblist, new_entry);
}
int strlist__load(struct strlist *slist, const char *filename)
{
char entry[1024];
int err;
FILE *fp = fopen(filename, "r");
if (fp == NULL)
return -errno;
while (fgets(entry, sizeof(entry), fp) != NULL) {
const size_t len = strlen(entry);
if (len == 0)
continue;
entry[len - 1] = '\0';
err = strlist__add(slist, entry);
if (err != 0)
goto out;
}
err = 0;
out:
fclose(fp);
return err;
}
void strlist__remove(struct strlist *slist, struct str_node *snode)
{
rblist__remove_node(&slist->rblist, &snode->rb_node);
}
struct str_node *strlist__find(struct strlist *slist, const char *entry)
{
struct str_node *snode = NULL;
struct rb_node *rb_node = rblist__find(&slist->rblist, entry);
if (rb_node)
snode = container_of(rb_node, struct str_node, rb_node);
return snode;
}
static int strlist__parse_list_entry(struct strlist *slist, const char *s,
const char *subst_dir)
{
int err;
char *subst = NULL;
if (strncmp(s, "file://", 7) == 0)
return strlist__load(slist, s + 7);
if (subst_dir) {
err = -ENOMEM;
if (asprintf(&subst, "%s/%s", subst_dir, s) < 0)
goto out;
if (access(subst, F_OK) == 0) {
err = strlist__load(slist, subst);
goto out;
}
if (slist->file_only) {
err = -ENOENT;
goto out;
}
}
err = strlist__add(slist, s);
out:
free(subst);
return err;
}
static int strlist__parse_list(struct strlist *slist, const char *s, const char *subst_dir)
{
char *sep;
int err;
while ((sep = strchr(s, ',')) != NULL) {
*sep = '\0';
err = strlist__parse_list_entry(slist, s, subst_dir);
*sep = ',';
if (err != 0)
return err;
s = sep + 1;
}
return *s ? strlist__parse_list_entry(slist, s, subst_dir) : 0;
}
struct strlist *strlist__new(const char *list, const struct strlist_config *config)
{
struct strlist *slist = malloc(sizeof(*slist));
if (slist != NULL) {
bool dupstr = true;
bool file_only = false;
const char *dirname = NULL;
if (config) {
dupstr = !config->dont_dupstr;
dirname = config->dirname;
file_only = config->file_only;
}
rblist__init(&slist->rblist);
slist->rblist.node_cmp = strlist__node_cmp;
slist->rblist.node_new = strlist__node_new;
slist->rblist.node_delete = strlist__node_delete;
slist->dupstr = dupstr;
slist->file_only = file_only;
if (list && strlist__parse_list(slist, list, dirname) != 0)
goto out_error;
}
return slist;
out_error:
free(slist);
return NULL;
}
void strlist__delete(struct strlist *slist)
{
if (slist != NULL)
rblist__delete(&slist->rblist);
}
struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
{
struct str_node *snode = NULL;
struct rb_node *rb_node;
rb_node = rblist__entry(&slist->rblist, idx);
if (rb_node)
snode = container_of(rb_node, struct str_node, rb_node);
return snode;
}
| linux-master | tools/perf/util/strlist.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <stdio.h>
#include <stdbool.h>
#include "util/evlist.h"
#include "evsel.h"
#include "util/evsel_fprintf.h"
#include "util/event.h"
#include "callchain.h"
#include "map.h"
#include "strlist.h"
#include "symbol.h"
#include "srcline.h"
#include "dso.h"
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
{
va_list args;
int ret = 0;
if (!*first) {
ret += fprintf(fp, ",");
} else {
ret += fprintf(fp, ":");
*first = false;
}
va_start(args, fmt);
ret += vfprintf(fp, fmt, args);
va_end(args);
return ret;
}
static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
{
return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
}
int evsel__fprintf(struct evsel *evsel, struct perf_attr_details *details, FILE *fp)
{
bool first = true;
int printed = 0;
if (details->event_group) {
struct evsel *pos;
if (!evsel__is_group_leader(evsel))
return 0;
if (evsel->core.nr_members > 1)
printed += fprintf(fp, "%s{", evsel->group_name ?: "");
printed += fprintf(fp, "%s", evsel__name(evsel));
for_each_group_member(pos, evsel)
printed += fprintf(fp, ",%s", evsel__name(pos));
if (evsel->core.nr_members > 1)
printed += fprintf(fp, "}");
goto out;
}
printed += fprintf(fp, "%s", evsel__name(evsel));
if (details->verbose) {
printed += perf_event_attr__fprintf(fp, &evsel->core.attr,
__print_attr__fprintf, &first);
} else if (details->freq) {
const char *term = "sample_freq";
if (!evsel->core.attr.freq)
term = "sample_period";
printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
term, (u64)evsel->core.attr.sample_freq);
}
#ifdef HAVE_LIBTRACEEVENT
if (details->trace_fields) {
struct tep_format_field *field;
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
printed += comma_fprintf(fp, &first, " (not a tracepoint)");
goto out;
}
field = evsel->tp_format->format.fields;
if (field == NULL) {
printed += comma_fprintf(fp, &first, " (no trace field)");
goto out;
}
printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
field = field->next;
while (field) {
printed += comma_fprintf(fp, &first, "%s", field->name);
field = field->next;
}
}
#endif
out:
fputc('\n', fp);
return ++printed;
}
#ifndef PYTHON_PERF
int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
unsigned int print_opts, struct callchain_cursor *cursor,
struct strlist *bt_stop_list, FILE *fp)
{
int printed = 0;
struct callchain_cursor_node *node;
int print_ip = print_opts & EVSEL__PRINT_IP;
int print_sym = print_opts & EVSEL__PRINT_SYM;
int print_dso = print_opts & EVSEL__PRINT_DSO;
int print_dsoff = print_opts & EVSEL__PRINT_DSOFF;
int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
int print_oneline = print_opts & EVSEL__PRINT_ONELINE;
int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
int print_arrow = print_opts & EVSEL__PRINT_CALLCHAIN_ARROW;
int print_skip_ignored = print_opts & EVSEL__PRINT_SKIP_IGNORED;
char s = print_oneline ? ' ' : '\t';
bool first = true;
if (cursor == NULL)
return fprintf(fp, "<not enough memory for the callchain cursor>%s", print_oneline ? "" : "\n");
if (sample->callchain) {
callchain_cursor_commit(cursor);
while (1) {
struct symbol *sym;
struct map *map;
u64 addr = 0;
node = callchain_cursor_current(cursor);
if (!node)
break;
sym = node->ms.sym;
map = node->ms.map;
if (sym && sym->ignore && print_skip_ignored)
goto next;
printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
if (print_arrow && !first)
printed += fprintf(fp, " <-");
if (map)
addr = map__map_ip(map, node->ip);
if (print_ip)
printed += fprintf(fp, "%c%16" PRIx64, s, node->ip);
if (print_sym) {
struct addr_location node_al;
addr_location__init(&node_al);
printed += fprintf(fp, " ");
node_al.addr = addr;
node_al.map = map__get(map);
if (print_symoffset) {
printed += __symbol__fprintf_symname_offs(sym, &node_al,
print_unknown_as_addr,
true, fp);
} else {
printed += __symbol__fprintf_symname(sym, &node_al,
print_unknown_as_addr, fp);
}
addr_location__exit(&node_al);
}
if (print_dso && (!sym || !sym->inlined))
printed += map__fprintf_dsoname_dsoff(map, print_dsoff, addr, fp);
if (print_srcline)
printed += map__fprintf_srcline(map, addr, "\n ", fp);
if (sym && sym->inlined)
printed += fprintf(fp, " (inlined)");
if (!print_oneline)
printed += fprintf(fp, "\n");
/* Add srccode here too? */
if (bt_stop_list && sym &&
strlist__has_entry(bt_stop_list, sym->name)) {
break;
}
first = false;
next:
callchain_cursor_advance(cursor);
}
}
return printed;
}
int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
int left_alignment, unsigned int print_opts,
struct callchain_cursor *cursor, struct strlist *bt_stop_list, FILE *fp)
{
int printed = 0;
int print_ip = print_opts & EVSEL__PRINT_IP;
int print_sym = print_opts & EVSEL__PRINT_SYM;
int print_dso = print_opts & EVSEL__PRINT_DSO;
int print_dsoff = print_opts & EVSEL__PRINT_DSOFF;
int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
if (cursor != NULL) {
printed += sample__fprintf_callchain(sample, left_alignment, print_opts,
cursor, bt_stop_list, fp);
} else {
printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
if (print_ip)
printed += fprintf(fp, "%16" PRIx64, sample->ip);
if (print_sym) {
printed += fprintf(fp, " ");
if (print_symoffset) {
printed += __symbol__fprintf_symname_offs(al->sym, al,
print_unknown_as_addr,
true, fp);
} else {
printed += __symbol__fprintf_symname(al->sym, al,
print_unknown_as_addr, fp);
}
}
if (print_dso)
printed += map__fprintf_dsoname_dsoff(al->map, print_dsoff, al->addr, fp);
if (print_srcline)
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
}
return printed;
}
#endif /* PYTHON_PERF */
| linux-master | tools/perf/util/evsel_fprintf.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdlib.h>
#include <linux/zalloc.h>
#include "debug.h"
#include "dso.h"
#include "map.h"
#include "maps.h"
#include "thread.h"
#include "ui/ui.h"
#include "unwind.h"
static void maps__init(struct maps *maps, struct machine *machine)
{
refcount_set(maps__refcnt(maps), 1);
init_rwsem(maps__lock(maps));
RC_CHK_ACCESS(maps)->entries = RB_ROOT;
RC_CHK_ACCESS(maps)->machine = machine;
RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
RC_CHK_ACCESS(maps)->nr_maps = 0;
RC_CHK_ACCESS(maps)->maps_by_name = NULL;
}
static void __maps__free_maps_by_name(struct maps *maps)
{
/*
* Free everything to try to do it from the rbtree in the next search
*/
for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
map__put(maps__maps_by_name(maps)[i]);
zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
}
static int __maps__insert(struct maps *maps, struct map *map)
{
struct rb_node **p = &maps__entries(maps)->rb_node;
struct rb_node *parent = NULL;
const u64 ip = map__start(map);
struct map_rb_node *m, *new_rb_node;
new_rb_node = malloc(sizeof(*new_rb_node));
if (!new_rb_node)
return -ENOMEM;
RB_CLEAR_NODE(&new_rb_node->rb_node);
new_rb_node->map = map__get(map);
while (*p != NULL) {
parent = *p;
m = rb_entry(parent, struct map_rb_node, rb_node);
if (ip < map__start(m->map))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&new_rb_node->rb_node, parent, p);
rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
return 0;
}
int maps__insert(struct maps *maps, struct map *map)
{
int err;
const struct dso *dso = map__dso(map);
down_write(maps__lock(maps));
err = __maps__insert(maps, map);
if (err)
goto out;
++RC_CHK_ACCESS(maps)->nr_maps;
if (dso && dso->kernel) {
struct kmap *kmap = map__kmap(map);
if (kmap)
kmap->kmaps = maps;
else
pr_err("Internal error: kernel dso with non kernel map\n");
}
/*
* If we already performed some search by name, then we need to add the just
* inserted map and resort.
*/
if (maps__maps_by_name(maps)) {
if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
int nr_allocate = maps__nr_maps(maps) * 2;
struct map **maps_by_name = realloc(maps__maps_by_name(maps),
nr_allocate * sizeof(map));
if (maps_by_name == NULL) {
__maps__free_maps_by_name(maps);
err = -ENOMEM;
goto out;
}
RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
}
maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
__maps__sort_by_name(maps);
}
out:
up_write(maps__lock(maps));
return err;
}
static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
{
rb_erase_init(&rb_node->rb_node, maps__entries(maps));
map__put(rb_node->map);
free(rb_node);
}
void maps__remove(struct maps *maps, struct map *map)
{
struct map_rb_node *rb_node;
down_write(maps__lock(maps));
if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
rb_node = maps__find_node(maps, map);
assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
__maps__remove(maps, rb_node);
if (maps__maps_by_name(maps))
__maps__free_maps_by_name(maps);
--RC_CHK_ACCESS(maps)->nr_maps;
up_write(maps__lock(maps));
}
static void __maps__purge(struct maps *maps)
{
struct map_rb_node *pos, *next;
if (maps__maps_by_name(maps))
__maps__free_maps_by_name(maps);
maps__for_each_entry_safe(maps, pos, next) {
rb_erase_init(&pos->rb_node, maps__entries(maps));
map__put(pos->map);
free(pos);
}
}
static void maps__exit(struct maps *maps)
{
down_write(maps__lock(maps));
__maps__purge(maps);
up_write(maps__lock(maps));
}
bool maps__empty(struct maps *maps)
{
return !maps__first(maps);
}
struct maps *maps__new(struct machine *machine)
{
struct maps *result;
RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
if (ADD_RC_CHK(result, maps))
maps__init(result, machine);
return result;
}
static void maps__delete(struct maps *maps)
{
maps__exit(maps);
unwind__finish_access(maps);
RC_CHK_FREE(maps);
}
struct maps *maps__get(struct maps *maps)
{
struct maps *result;
if (RC_CHK_GET(result, maps))
refcount_inc(maps__refcnt(maps));
return result;
}
void maps__put(struct maps *maps)
{
if (maps && refcount_dec_and_test(maps__refcnt(maps)))
maps__delete(maps);
else
RC_CHK_PUT(maps);
}
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
{
struct map *map = maps__find(maps, addr);
/* Ensure map is loaded before using map->map_ip */
if (map != NULL && map__load(map) >= 0) {
if (mapp != NULL)
*mapp = map;
return map__find_symbol(map, map__map_ip(map, addr));
}
return NULL;
}
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
{
struct symbol *sym;
struct map_rb_node *pos;
down_read(maps__lock(maps));
maps__for_each_entry(maps, pos) {
sym = map__find_symbol_by_name(pos->map, name);
if (sym == NULL)
continue;
if (!map__contains_symbol(pos->map, sym)) {
sym = NULL;
continue;
}
if (mapp != NULL)
*mapp = pos->map;
goto out;
}
sym = NULL;
out:
up_read(maps__lock(maps));
return sym;
}
int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
{
if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
if (maps == NULL)
return -1;
ams->ms.map = maps__find(maps, ams->addr);
if (ams->ms.map == NULL)
return -1;
}
ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
return ams->ms.sym ? 0 : -1;
}
size_t maps__fprintf(struct maps *maps, FILE *fp)
{
size_t printed = 0;
struct map_rb_node *pos;
down_read(maps__lock(maps));
maps__for_each_entry(maps, pos) {
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos->map, fp);
if (verbose > 2) {
printed += dso__fprintf(map__dso(pos->map), fp);
printed += fprintf(fp, "--\n");
}
}
up_read(maps__lock(maps));
return printed;
}
int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
{
struct rb_root *root;
struct rb_node *next, *first;
int err = 0;
down_write(maps__lock(maps));
root = maps__entries(maps);
/*
* Find first map where end > map->start.
* Same as find_vma() in kernel.
*/
next = root->rb_node;
first = NULL;
while (next) {
struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
if (map__end(pos->map) > map__start(map)) {
first = next;
if (map__start(pos->map) <= map__start(map))
break;
next = next->rb_left;
} else
next = next->rb_right;
}
next = first;
while (next && !err) {
struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
next = rb_next(&pos->rb_node);
/*
* Stop if current map starts after map->end.
* Maps are ordered by start: next will not overlap for sure.
*/
if (map__start(pos->map) >= map__end(map))
break;
if (verbose >= 2) {
if (use_browser) {
pr_debug("overlapping maps in %s (disable tui for more info)\n",
map__dso(map)->name);
} else {
fputs("overlapping maps:\n", fp);
map__fprintf(map, fp);
map__fprintf(pos->map, fp);
}
}
rb_erase_init(&pos->rb_node, root);
/*
* Now check if we need to create new maps for areas not
* overlapped by the new map:
*/
if (map__start(map) > map__start(pos->map)) {
struct map *before = map__clone(pos->map);
if (before == NULL) {
err = -ENOMEM;
goto put_map;
}
map__set_end(before, map__start(map));
err = __maps__insert(maps, before);
if (err) {
map__put(before);
goto put_map;
}
if (verbose >= 2 && !use_browser)
map__fprintf(before, fp);
map__put(before);
}
if (map__end(map) < map__end(pos->map)) {
struct map *after = map__clone(pos->map);
if (after == NULL) {
err = -ENOMEM;
goto put_map;
}
map__set_start(after, map__end(map));
map__add_pgoff(after, map__end(map) - map__start(pos->map));
assert(map__map_ip(pos->map, map__end(map)) ==
map__map_ip(after, map__end(map)));
err = __maps__insert(maps, after);
if (err) {
map__put(after);
goto put_map;
}
if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
map__put(after);
}
put_map:
map__put(pos->map);
free(pos);
}
up_write(maps__lock(maps));
return err;
}
/*
* XXX This should not really _copy_ te maps, but refcount them.
*/
int maps__clone(struct thread *thread, struct maps *parent)
{
struct maps *maps = thread__maps(thread);
int err;
struct map_rb_node *rb_node;
down_read(maps__lock(parent));
maps__for_each_entry(parent, rb_node) {
struct map *new = map__clone(rb_node->map);
if (new == NULL) {
err = -ENOMEM;
goto out_unlock;
}
err = unwind__prepare_access(maps, new, NULL);
if (err)
goto out_unlock;
err = maps__insert(maps, new);
if (err)
goto out_unlock;
map__put(new);
}
err = 0;
out_unlock:
up_read(maps__lock(parent));
return err;
}
struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
{
struct map_rb_node *rb_node;
maps__for_each_entry(maps, rb_node) {
if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
return rb_node;
}
return NULL;
}
struct map *maps__find(struct maps *maps, u64 ip)
{
struct rb_node *p;
struct map_rb_node *m;
down_read(maps__lock(maps));
p = maps__entries(maps)->rb_node;
while (p != NULL) {
m = rb_entry(p, struct map_rb_node, rb_node);
if (ip < map__start(m->map))
p = p->rb_left;
else if (ip >= map__end(m->map))
p = p->rb_right;
else
goto out;
}
m = NULL;
out:
up_read(maps__lock(maps));
return m ? m->map : NULL;
}
struct map_rb_node *maps__first(struct maps *maps)
{
struct rb_node *first = rb_first(maps__entries(maps));
if (first)
return rb_entry(first, struct map_rb_node, rb_node);
return NULL;
}
struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
{
struct rb_node *next;
if (!node)
return NULL;
next = rb_next(&node->rb_node);
if (!next)
return NULL;
return rb_entry(next, struct map_rb_node, rb_node);
}
| linux-master | tools/perf/util/maps.c |
#include <stdio.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdlib.h>
#include <linux/err.h>
#include "util/ftrace.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/bpf_counter.h"
#include "util/bpf_skel/func_latency.skel.h"
static struct func_latency_bpf *skel;
int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
{
int fd, err;
int i, ncpus = 1, ntasks = 1;
struct filter_entry *func;
if (!list_is_singular(&ftrace->filters)) {
pr_err("ERROR: %s target function(s).\n",
list_empty(&ftrace->filters) ? "No" : "Too many");
return -1;
}
func = list_first_entry(&ftrace->filters, struct filter_entry, list);
skel = func_latency_bpf__open();
if (!skel) {
pr_err("Failed to open func latency skeleton\n");
return -1;
}
/* don't need to set cpu filter for system-wide mode */
if (ftrace->target.cpu_list) {
ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
}
if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
}
set_max_rlimit();
err = func_latency_bpf__load(skel);
if (err) {
pr_err("Failed to load func latency skeleton\n");
goto out;
}
if (ftrace->target.cpu_list) {
u32 cpu;
u8 val = 1;
skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
}
}
if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
u32 pid;
u8 val = 1;
skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
pid = perf_thread_map__pid(ftrace->evlist->core.threads, i);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
}
skel->bss->use_nsec = ftrace->use_nsec;
skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
false, func->name);
if (IS_ERR(skel->links.func_begin)) {
pr_err("Failed to attach fentry program\n");
err = PTR_ERR(skel->links.func_begin);
goto out;
}
skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
true, func->name);
if (IS_ERR(skel->links.func_end)) {
pr_err("Failed to attach fexit program\n");
err = PTR_ERR(skel->links.func_end);
goto out;
}
/* XXX: we don't actually use this fd - just for poll() */
return open("/dev/null", O_RDONLY);
out:
return err;
}
int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
{
skel->bss->enabled = 1;
return 0;
}
int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
{
skel->bss->enabled = 0;
return 0;
}
int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
int buckets[])
{
int i, fd, err;
u32 idx;
u64 *hist;
int ncpus = cpu__max_cpu().cpu;
fd = bpf_map__fd(skel->maps.latency);
hist = calloc(ncpus, sizeof(*hist));
if (hist == NULL)
return -ENOMEM;
for (idx = 0; idx < NUM_BUCKET; idx++) {
err = bpf_map_lookup_elem(fd, &idx, hist);
if (err) {
buckets[idx] = 0;
continue;
}
for (i = 0; i < ncpus; i++)
buckets[idx] += hist[i];
}
free(hist);
return 0;
}
int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
{
func_latency_bpf__destroy(skel);
return 0;
}
| linux-master | tools/perf/util/bpf_ftrace.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "util/debug.h"
#include <subcmd/parse-options.h>
#include "util/perf_regs.h"
#include "util/parse-regs-options.h"
static int
__parse_regs(const struct option *opt, const char *str, int unset, bool intr)
{
uint64_t *mode = (uint64_t *)opt->value;
const struct sample_reg *r = NULL;
char *s, *os = NULL, *p;
int ret = -1;
uint64_t mask;
if (unset)
return 0;
/*
* cannot set it twice
*/
if (*mode)
return -1;
if (intr)
mask = arch__intr_reg_mask();
else
mask = arch__user_reg_mask();
/* str may be NULL in case no arg is passed to -I */
if (str) {
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
if (!strcmp(s, "?")) {
fprintf(stderr, "available registers: ");
#ifdef HAVE_PERF_REGS_SUPPORT
for (r = sample_reg_masks; r->name; r++) {
if (r->mask & mask)
fprintf(stderr, "%s ", r->name);
}
#endif
fputc('\n', stderr);
/* just printing available regs */
goto error;
}
#ifdef HAVE_PERF_REGS_SUPPORT
for (r = sample_reg_masks; r->name; r++) {
if ((r->mask & mask) && !strcasecmp(s, r->name))
break;
}
#endif
if (!r || !r->name) {
ui__warning("Unknown register \"%s\", check man page or run \"perf record %s?\"\n",
s, intr ? "-I" : "--user-regs=");
goto error;
}
*mode |= r->mask;
if (!p)
break;
s = p + 1;
}
}
ret = 0;
/* default to all possible regs */
if (*mode == 0)
*mode = mask;
error:
free(os);
return ret;
}
int
parse_user_regs(const struct option *opt, const char *str, int unset)
{
return __parse_regs(opt, str, unset, false);
}
int
parse_intr_regs(const struct option *opt, const char *str, int unset)
{
return __parse_regs(opt, str, unset, true);
}
| linux-master | tools/perf/util/parse-regs-options.c |
// SPDX-License-Identifier: GPL-2.0
#include "mutex.h"
#include "debug.h"
#include <linux/string.h>
#include <errno.h>
static void check_err(const char *fn, int err)
{
char sbuf[STRERR_BUFSIZE];
if (err == 0)
return;
pr_err("%s error: '%s'\n", fn, str_error_r(err, sbuf, sizeof(sbuf)));
}
#define CHECK_ERR(err) check_err(__func__, err)
static void __mutex_init(struct mutex *mtx, bool pshared)
{
pthread_mutexattr_t attr;
CHECK_ERR(pthread_mutexattr_init(&attr));
#ifndef NDEBUG
/* In normal builds enable error checking, such as recursive usage. */
CHECK_ERR(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
#endif
if (pshared)
CHECK_ERR(pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
CHECK_ERR(pthread_mutex_init(&mtx->lock, &attr));
CHECK_ERR(pthread_mutexattr_destroy(&attr));
}
void mutex_init(struct mutex *mtx)
{
__mutex_init(mtx, /*pshared=*/false);
}
void mutex_init_pshared(struct mutex *mtx)
{
__mutex_init(mtx, /*pshared=*/true);
}
void mutex_destroy(struct mutex *mtx)
{
CHECK_ERR(pthread_mutex_destroy(&mtx->lock));
}
void mutex_lock(struct mutex *mtx)
NO_THREAD_SAFETY_ANALYSIS
{
CHECK_ERR(pthread_mutex_lock(&mtx->lock));
}
void mutex_unlock(struct mutex *mtx)
NO_THREAD_SAFETY_ANALYSIS
{
CHECK_ERR(pthread_mutex_unlock(&mtx->lock));
}
bool mutex_trylock(struct mutex *mtx)
{
int ret = pthread_mutex_trylock(&mtx->lock);
if (ret == 0)
return true; /* Lock acquired. */
if (ret == EBUSY)
return false; /* Lock busy. */
/* Print error. */
CHECK_ERR(ret);
return false;
}
static void __cond_init(struct cond *cnd, bool pshared)
{
pthread_condattr_t attr;
CHECK_ERR(pthread_condattr_init(&attr));
if (pshared)
CHECK_ERR(pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
CHECK_ERR(pthread_cond_init(&cnd->cond, &attr));
CHECK_ERR(pthread_condattr_destroy(&attr));
}
void cond_init(struct cond *cnd)
{
__cond_init(cnd, /*pshared=*/false);
}
void cond_init_pshared(struct cond *cnd)
{
__cond_init(cnd, /*pshared=*/true);
}
void cond_destroy(struct cond *cnd)
{
CHECK_ERR(pthread_cond_destroy(&cnd->cond));
}
void cond_wait(struct cond *cnd, struct mutex *mtx)
{
CHECK_ERR(pthread_cond_wait(&cnd->cond, &mtx->lock));
}
void cond_signal(struct cond *cnd)
{
CHECK_ERR(pthread_cond_signal(&cnd->cond));
}
void cond_broadcast(struct cond *cnd)
{
CHECK_ERR(pthread_cond_broadcast(&cnd->cond));
}
| linux-master | tools/perf/util/mutex.c |
// SPDX-License-Identifier: GPL-2.0
#include "levenshtein.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
/*
* This function implements the Damerau-Levenshtein algorithm to
* calculate a distance between strings.
*
* Basically, it says how many letters need to be swapped, substituted,
* deleted from, or added to string1, at least, to get string2.
*
* The idea is to build a distance matrix for the substrings of both
* strings. To avoid a large space complexity, only the last three rows
* are kept in memory (if swaps had the same or higher cost as one deletion
* plus one insertion, only two rows would be needed).
*
* At any stage, "i + 1" denotes the length of the current substring of
* string1 that the distance is calculated for.
*
* row2 holds the current row, row1 the previous row (i.e. for the substring
* of string1 of length "i"), and row0 the row before that.
*
* In other words, at the start of the big loop, row2[j + 1] contains the
* Damerau-Levenshtein distance between the substring of string1 of length
* "i" and the substring of string2 of length "j + 1".
*
* All the big loop does is determine the partial minimum-cost paths.
*
* It does so by calculating the costs of the path ending in characters
* i (in string1) and j (in string2), respectively, given that the last
* operation is a substitution, a swap, a deletion, or an insertion.
*
* This implementation allows the costs to be weighted:
*
* - w (as in "sWap")
* - s (as in "Substitution")
* - a (for insertion, AKA "Add")
* - d (as in "Deletion")
*
* Note that this algorithm calculates a distance _iff_ d == a.
*/
int levenshtein(const char *string1, const char *string2,
int w, int s, int a, int d)
{
int len1 = strlen(string1), len2 = strlen(string2);
int *row0 = malloc(sizeof(int) * (len2 + 1));
int *row1 = malloc(sizeof(int) * (len2 + 1));
int *row2 = malloc(sizeof(int) * (len2 + 1));
int i, j;
for (j = 0; j <= len2; j++)
row1[j] = j * a;
for (i = 0; i < len1; i++) {
int *dummy;
row2[0] = (i + 1) * d;
for (j = 0; j < len2; j++) {
/* substitution */
row2[j + 1] = row1[j] + s * (string1[i] != string2[j]);
/* swap */
if (i > 0 && j > 0 && string1[i - 1] == string2[j] &&
string1[i] == string2[j - 1] &&
row2[j + 1] > row0[j - 1] + w)
row2[j + 1] = row0[j - 1] + w;
/* deletion */
if (row2[j + 1] > row1[j + 1] + d)
row2[j + 1] = row1[j + 1] + d;
/* insertion */
if (row2[j + 1] > row2[j] + a)
row2[j + 1] = row2[j] + a;
}
dummy = row0;
row0 = row1;
row1 = row2;
row2 = dummy;
}
i = row1[len2];
free(row0);
free(row1);
free(row2);
return i;
}
| linux-master | tools/perf/util/levenshtein.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/copyfile.h"
#include "util/namespaces.h"
#include <internal/lib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi)
{
int err = -1;
char *line = NULL;
size_t n;
FILE *from_fp, *to_fp;
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
from_fp = fopen(from, "r");
nsinfo__mountns_exit(&nsc);
if (from_fp == NULL)
goto out;
to_fp = fopen(to, "w");
if (to_fp == NULL)
goto out_fclose_from;
while (getline(&line, &n, from_fp) > 0)
if (fputs(line, to_fp) == EOF)
goto out_fclose_to;
err = 0;
out_fclose_to:
fclose(to_fp);
free(line);
out_fclose_from:
fclose(from_fp);
out:
return err;
}
int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
{
void *ptr;
loff_t pgoff;
pgoff = off_in & ~(page_size - 1);
off_in -= pgoff;
ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
if (ptr == MAP_FAILED)
return -1;
while (size) {
ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
if (ret < 0 && errno == EINTR)
continue;
if (ret <= 0)
break;
size -= ret;
off_in += ret;
off_out += ret;
}
munmap(ptr, off_in + size);
return size ? -1 : 0;
}
static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
struct nsinfo *nsi)
{
int fromfd, tofd;
struct stat st;
int err;
char *tmp = NULL, *ptr = NULL;
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
err = stat(from, &st);
nsinfo__mountns_exit(&nsc);
if (err)
goto out;
err = -1;
/* extra 'x' at the end is to reserve space for '.' */
if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
tmp = NULL;
goto out;
}
ptr = strrchr(tmp, '/');
if (!ptr)
goto out;
ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1);
*ptr = '.';
tofd = mkstemp(tmp);
if (tofd < 0)
goto out;
if (st.st_size == 0) { /* /proc? do it slowly... */
err = slow_copyfile(from, tmp, nsi);
if (!err && fchmod(tofd, mode))
err = -1;
goto out_close_to;
}
if (fchmod(tofd, mode))
goto out_close_to;
nsinfo__mountns_enter(nsi, &nsc);
fromfd = open(from, O_RDONLY);
nsinfo__mountns_exit(&nsc);
if (fromfd < 0)
goto out_close_to;
err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
close(fromfd);
out_close_to:
close(tofd);
if (!err)
err = link(tmp, to);
unlink(tmp);
out:
free(tmp);
return err;
}
int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi)
{
return copyfile_mode_ns(from, to, 0755, nsi);
}
int copyfile_mode(const char *from, const char *to, mode_t mode)
{
return copyfile_mode_ns(from, to, mode, NULL);
}
int copyfile(const char *from, const char *to)
{
return copyfile_mode(from, to, 0755);
}
| linux-master | tools/perf/util/copyfile.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/libbpf.h>
#include "bpf-event.h"
#include "bpf-utils.h"
#endif
#include "compress.h"
#include "env.h"
#include "namespaces.h"
#include "path.h"
#include "map.h"
#include "symbol.h"
#include "srcline.h"
#include "dso.h"
#include "dsos.h"
#include "machine.h"
#include "auxtrace.h"
#include "util.h" /* O_CLOEXEC for older systems */
#include "debug.h"
#include "string2.h"
#include "vdso.h"
static const char * const debuglink_paths[] = {
"%.0s%s",
"%s/%s",
"%s/.debug/%s",
"/usr/lib/debug%s/%s"
};
char dso__symtab_origin(const struct dso *dso)
{
static const char origin[] = {
[DSO_BINARY_TYPE__KALLSYMS] = 'k',
[DSO_BINARY_TYPE__VMLINUX] = 'v',
[DSO_BINARY_TYPE__JAVA_JIT] = 'j',
[DSO_BINARY_TYPE__DEBUGLINK] = 'l',
[DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
[DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
[DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
[DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
[DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
[DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
[DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
[DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
[DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
};
if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
return '!';
return origin[dso->symtab_type];
}
bool dso__is_object_file(const struct dso *dso)
{
switch (dso->binary_type) {
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__BPF_IMAGE:
case DSO_BINARY_TYPE__OOL:
return false;
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__DEBUGLINK:
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
case DSO_BINARY_TYPE__KCORE:
case DSO_BINARY_TYPE__GUEST_KCORE:
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return true;
}
}
int dso__read_binary_type_filename(const struct dso *dso,
enum dso_binary_type type,
char *root_dir, char *filename, size_t size)
{
char build_id_hex[SBUILD_ID_SIZE];
int ret = 0;
size_t len;
switch (type) {
case DSO_BINARY_TYPE__DEBUGLINK:
{
const char *last_slash;
char dso_dir[PATH_MAX];
char symfile[PATH_MAX];
unsigned int i;
len = __symbol__join_symfs(filename, size, dso->long_name);
last_slash = filename + len;
while (last_slash != filename && *last_slash != '/')
last_slash--;
strncpy(dso_dir, filename, last_slash - filename);
dso_dir[last_slash-filename] = '\0';
if (!is_regular_file(filename)) {
ret = -1;
break;
}
ret = filename__read_debuglink(filename, symfile, PATH_MAX);
if (ret)
break;
/* Check predefined locations where debug file might reside */
ret = -1;
for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
snprintf(filename, size,
debuglink_paths[i], dso_dir, symfile);
if (is_regular_file(filename)) {
ret = 0;
break;
}
}
break;
}
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
if (dso__build_id_filename(dso, filename, size, false) == NULL)
ret = -1;
break;
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
if (dso__build_id_filename(dso, filename, size, true) == NULL)
ret = -1;
break;
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
snprintf(filename + len, size - len, "%s.debug", dso->long_name);
break;
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
snprintf(filename + len, size - len, "%s", dso->long_name);
break;
case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
/*
* Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
* /usr/lib/debug/lib when it is expected to be in
* /usr/lib/debug/usr/lib
*/
if (strlen(dso->long_name) < 9 ||
strncmp(dso->long_name, "/usr/lib/", 9)) {
ret = -1;
break;
}
len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
snprintf(filename + len, size - len, "%s", dso->long_name + 4);
break;
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
{
const char *last_slash;
size_t dir_size;
last_slash = dso->long_name + dso->long_name_len;
while (last_slash != dso->long_name && *last_slash != '/')
last_slash--;
len = __symbol__join_symfs(filename, size, "");
dir_size = last_slash - dso->long_name + 2;
if (dir_size > (size - len)) {
ret = -1;
break;
}
len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
len += scnprintf(filename + len , size - len, ".debug%s",
last_slash);
break;
}
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
if (!dso->has_build_id) {
ret = -1;
break;
}
build_id__sprintf(&dso->bid, build_id_hex);
len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
snprintf(filename + len, size - len, "%.2s/%s.debug",
build_id_hex, build_id_hex + 2);
break;
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
__symbol__join_symfs(filename, size, dso->long_name);
break;
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
path__join3(filename, size, symbol_conf.symfs,
root_dir, dso->long_name);
break;
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
__symbol__join_symfs(filename, size, dso->long_name);
break;
case DSO_BINARY_TYPE__KCORE:
case DSO_BINARY_TYPE__GUEST_KCORE:
snprintf(filename, size, "%s", dso->long_name);
break;
default:
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__BPF_IMAGE:
case DSO_BINARY_TYPE__OOL:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
break;
}
return ret;
}
enum {
COMP_ID__NONE = 0,
};
static const struct {
const char *fmt;
int (*decompress)(const char *input, int output);
bool (*is_compressed)(const char *input);
} compressions[] = {
[COMP_ID__NONE] = { .fmt = NULL, },
#ifdef HAVE_ZLIB_SUPPORT
{ "gz", gzip_decompress_to_file, gzip_is_compressed },
#endif
#ifdef HAVE_LZMA_SUPPORT
{ "xz", lzma_decompress_to_file, lzma_is_compressed },
#endif
{ NULL, NULL, NULL },
};
static int is_supported_compression(const char *ext)
{
unsigned i;
for (i = 1; compressions[i].fmt; i++) {
if (!strcmp(ext, compressions[i].fmt))
return i;
}
return COMP_ID__NONE;
}
bool is_kernel_module(const char *pathname, int cpumode)
{
struct kmod_path m;
int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
WARN_ONCE(mode != cpumode,
"Internal error: passing unmasked cpumode (%x) to is_kernel_module",
cpumode);
switch (mode) {
case PERF_RECORD_MISC_USER:
case PERF_RECORD_MISC_HYPERVISOR:
case PERF_RECORD_MISC_GUEST_USER:
return false;
/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
default:
if (kmod_path__parse(&m, pathname)) {
pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
pathname);
return true;
}
}
return m.kmod;
}
bool dso__needs_decompress(struct dso *dso)
{
return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
}
int filename__decompress(const char *name, char *pathname,
size_t len, int comp, int *err)
{
char tmpbuf[] = KMOD_DECOMP_NAME;
int fd = -1;
/*
* We have proper compression id for DSO and yet the file
* behind the 'name' can still be plain uncompressed object.
*
* The reason is behind the logic we open the DSO object files,
* when we try all possible 'debug' objects until we find the
* data. So even if the DSO is represented by 'krava.xz' module,
* we can end up here opening ~/.debug/....23432432/debug' file
* which is not compressed.
*
* To keep this transparent, we detect this and return the file
* descriptor to the uncompressed file.
*/
if (!compressions[comp].is_compressed(name))
return open(name, O_RDONLY);
fd = mkstemp(tmpbuf);
if (fd < 0) {
*err = errno;
return -1;
}
if (compressions[comp].decompress(name, fd)) {
*err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
close(fd);
fd = -1;
}
if (!pathname || (fd < 0))
unlink(tmpbuf);
if (pathname && (fd >= 0))
strlcpy(pathname, tmpbuf, len);
return fd;
}
static int decompress_kmodule(struct dso *dso, const char *name,
char *pathname, size_t len)
{
if (!dso__needs_decompress(dso))
return -1;
if (dso->comp == COMP_ID__NONE)
return -1;
return filename__decompress(name, pathname, len, dso->comp,
&dso->load_errno);
}
int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
{
return decompress_kmodule(dso, name, NULL, 0);
}
int dso__decompress_kmodule_path(struct dso *dso, const char *name,
char *pathname, size_t len)
{
int fd = decompress_kmodule(dso, name, pathname, len);
close(fd);
return fd >= 0 ? 0 : -1;
}
/*
* Parses kernel module specified in @path and updates
* @m argument like:
*
* @comp - true if @path contains supported compression suffix,
* false otherwise
* @kmod - true if @path contains '.ko' suffix in right position,
* false otherwise
* @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
* of the kernel module without suffixes, otherwise strudup-ed
* base name of @path
* @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
* the compression suffix
*
* Returns 0 if there's no strdup error, -ENOMEM otherwise.
*/
int __kmod_path__parse(struct kmod_path *m, const char *path,
bool alloc_name)
{
const char *name = strrchr(path, '/');
const char *ext = strrchr(path, '.');
bool is_simple_name = false;
memset(m, 0x0, sizeof(*m));
name = name ? name + 1 : path;
/*
* '.' is also a valid character for module name. For example:
* [aaa.bbb] is a valid module name. '[' should have higher
* priority than '.ko' suffix.
*
* The kernel names are from machine__mmap_name. Such
* name should belong to kernel itself, not kernel module.
*/
if (name[0] == '[') {
is_simple_name = true;
if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
(strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
(strncmp(name, "[vdso]", 6) == 0) ||
(strncmp(name, "[vdso32]", 8) == 0) ||
(strncmp(name, "[vdsox32]", 9) == 0) ||
(strncmp(name, "[vsyscall]", 10) == 0)) {
m->kmod = false;
} else
m->kmod = true;
}
/* No extension, just return name. */
if ((ext == NULL) || is_simple_name) {
if (alloc_name) {
m->name = strdup(name);
return m->name ? 0 : -ENOMEM;
}
return 0;
}
m->comp = is_supported_compression(ext + 1);
if (m->comp > COMP_ID__NONE)
ext -= 3;
/* Check .ko extension only if there's enough name left. */
if (ext > name)
m->kmod = !strncmp(ext, ".ko", 3);
if (alloc_name) {
if (m->kmod) {
if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
return -ENOMEM;
} else {
if (asprintf(&m->name, "%s", name) == -1)
return -ENOMEM;
}
strreplace(m->name, '-', '_');
}
return 0;
}
void dso__set_module_info(struct dso *dso, struct kmod_path *m,
struct machine *machine)
{
if (machine__is_host(machine))
dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
else
dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
/* _KMODULE_COMP should be next to _KMODULE */
if (m->kmod && m->comp) {
dso->symtab_type++;
dso->comp = m->comp;
}
dso__set_short_name(dso, strdup(m->name), true);
}
/*
* Global list of open DSOs and the counter.
*/
static LIST_HEAD(dso__data_open);
static long dso__data_open_cnt;
static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
static void dso__list_add(struct dso *dso)
{
list_add_tail(&dso->data.open_entry, &dso__data_open);
dso__data_open_cnt++;
}
static void dso__list_del(struct dso *dso)
{
list_del_init(&dso->data.open_entry);
WARN_ONCE(dso__data_open_cnt <= 0,
"DSO data fd counter out of bounds.");
dso__data_open_cnt--;
}
static void close_first_dso(void);
static int do_open(char *name)
{
int fd;
char sbuf[STRERR_BUFSIZE];
do {
fd = open(name, O_RDONLY|O_CLOEXEC);
if (fd >= 0)
return fd;
pr_debug("dso open failed: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
if (!dso__data_open_cnt || errno != EMFILE)
break;
close_first_dso();
} while (1);
return -1;
}
char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
{
return filename_with_chroot(nsinfo__pid(dso->nsinfo), filename);
}
static int __open_dso(struct dso *dso, struct machine *machine)
{
int fd = -EINVAL;
char *root_dir = (char *)"";
char *name = malloc(PATH_MAX);
bool decomp = false;
if (!name)
return -ENOMEM;
mutex_lock(&dso->lock);
if (machine)
root_dir = machine->root_dir;
if (dso__read_binary_type_filename(dso, dso->binary_type,
root_dir, name, PATH_MAX))
goto out;
if (!is_regular_file(name)) {
char *new_name;
if (errno != ENOENT || dso->nsinfo == NULL)
goto out;
new_name = dso__filename_with_chroot(dso, name);
if (!new_name)
goto out;
free(name);
name = new_name;
}
if (dso__needs_decompress(dso)) {
char newpath[KMOD_DECOMP_LEN];
size_t len = sizeof(newpath);
if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
fd = -dso->load_errno;
goto out;
}
decomp = true;
strcpy(name, newpath);
}
fd = do_open(name);
if (decomp)
unlink(name);
out:
mutex_unlock(&dso->lock);
free(name);
return fd;
}
static void check_data_close(void);
/**
* dso_close - Open DSO data file
* @dso: dso object
*
* Open @dso's data file descriptor and updates
* list/count of open DSO objects.
*/
static int open_dso(struct dso *dso, struct machine *machine)
{
int fd;
struct nscookie nsc;
if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
mutex_lock(&dso->lock);
nsinfo__mountns_enter(dso->nsinfo, &nsc);
mutex_unlock(&dso->lock);
}
fd = __open_dso(dso, machine);
if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
nsinfo__mountns_exit(&nsc);
if (fd >= 0) {
dso__list_add(dso);
/*
* Check if we crossed the allowed number
* of opened DSOs and close one if needed.
*/
check_data_close();
}
return fd;
}
static void close_data_fd(struct dso *dso)
{
if (dso->data.fd >= 0) {
close(dso->data.fd);
dso->data.fd = -1;
dso->data.file_size = 0;
dso__list_del(dso);
}
}
/**
* dso_close - Close DSO data file
* @dso: dso object
*
* Close @dso's data file descriptor and updates
* list/count of open DSO objects.
*/
static void close_dso(struct dso *dso)
{
close_data_fd(dso);
}
static void close_first_dso(void)
{
struct dso *dso;
dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
close_dso(dso);
}
static rlim_t get_fd_limit(void)
{
struct rlimit l;
rlim_t limit = 0;
/* Allow half of the current open fd limit. */
if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
if (l.rlim_cur == RLIM_INFINITY)
limit = l.rlim_cur;
else
limit = l.rlim_cur / 2;
} else {
pr_err("failed to get fd limit\n");
limit = 1;
}
return limit;
}
static rlim_t fd_limit;
/*
* Used only by tests/dso-data.c to reset the environment
* for tests. I dont expect we should change this during
* standard runtime.
*/
void reset_fd_limit(void)
{
fd_limit = 0;
}
static bool may_cache_fd(void)
{
if (!fd_limit)
fd_limit = get_fd_limit();
if (fd_limit == RLIM_INFINITY)
return true;
return fd_limit > (rlim_t) dso__data_open_cnt;
}
/*
* Check and close LRU dso if we crossed allowed limit
* for opened dso file descriptors. The limit is half
* of the RLIMIT_NOFILE files opened.
*/
static void check_data_close(void)
{
bool cache_fd = may_cache_fd();
if (!cache_fd)
close_first_dso();
}
/**
* dso__data_close - Close DSO data file
* @dso: dso object
*
* External interface to close @dso's data file descriptor.
*/
void dso__data_close(struct dso *dso)
{
pthread_mutex_lock(&dso__data_open_lock);
close_dso(dso);
pthread_mutex_unlock(&dso__data_open_lock);
}
static void try_to_open_dso(struct dso *dso, struct machine *machine)
{
enum dso_binary_type binary_type_data[] = {
DSO_BINARY_TYPE__BUILD_ID_CACHE,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__NOT_FOUND,
};
int i = 0;
if (dso->data.fd >= 0)
return;
if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
dso->data.fd = open_dso(dso, machine);
goto out;
}
do {
dso->binary_type = binary_type_data[i++];
dso->data.fd = open_dso(dso, machine);
if (dso->data.fd >= 0)
goto out;
} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
out:
if (dso->data.fd >= 0)
dso->data.status = DSO_DATA_STATUS_OK;
else
dso->data.status = DSO_DATA_STATUS_ERROR;
}
/**
* dso__data_get_fd - Get dso's data file descriptor
* @dso: dso object
* @machine: machine object
*
* External interface to find dso's file, open it and
* returns file descriptor. It should be paired with
* dso__data_put_fd() if it returns non-negative value.
*/
int dso__data_get_fd(struct dso *dso, struct machine *machine)
{
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
if (pthread_mutex_lock(&dso__data_open_lock) < 0)
return -1;
try_to_open_dso(dso, machine);
if (dso->data.fd < 0)
pthread_mutex_unlock(&dso__data_open_lock);
return dso->data.fd;
}
void dso__data_put_fd(struct dso *dso __maybe_unused)
{
pthread_mutex_unlock(&dso__data_open_lock);
}
bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
{
u32 flag = 1 << by;
if (dso->data.status_seen & flag)
return true;
dso->data.status_seen |= flag;
return false;
}
#ifdef HAVE_LIBBPF_SUPPORT
static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
{
struct bpf_prog_info_node *node;
ssize_t size = DSO__DATA_CACHE_SIZE;
u64 len;
u8 *buf;
node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
if (!node || !node->info_linear) {
dso->data.status = DSO_DATA_STATUS_ERROR;
return -1;
}
len = node->info_linear->info.jited_prog_len;
buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
if (offset >= len)
return -1;
size = (ssize_t)min(len - offset, (u64)size);
memcpy(data, buf + offset, size);
return size;
}
static int bpf_size(struct dso *dso)
{
struct bpf_prog_info_node *node;
node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
if (!node || !node->info_linear) {
dso->data.status = DSO_DATA_STATUS_ERROR;
return -1;
}
dso->data.file_size = node->info_linear->info.jited_prog_len;
return 0;
}
#endif // HAVE_LIBBPF_SUPPORT
static void
dso_cache__free(struct dso *dso)
{
struct rb_root *root = &dso->data.cache;
struct rb_node *next = rb_first(root);
mutex_lock(&dso->lock);
while (next) {
struct dso_cache *cache;
cache = rb_entry(next, struct dso_cache, rb_node);
next = rb_next(&cache->rb_node);
rb_erase(&cache->rb_node, root);
free(cache);
}
mutex_unlock(&dso->lock);
}
static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
{
const struct rb_root *root = &dso->data.cache;
struct rb_node * const *p = &root->rb_node;
const struct rb_node *parent = NULL;
struct dso_cache *cache;
while (*p != NULL) {
u64 end;
parent = *p;
cache = rb_entry(parent, struct dso_cache, rb_node);
end = cache->offset + DSO__DATA_CACHE_SIZE;
if (offset < cache->offset)
p = &(*p)->rb_left;
else if (offset >= end)
p = &(*p)->rb_right;
else
return cache;
}
return NULL;
}
static struct dso_cache *
dso_cache__insert(struct dso *dso, struct dso_cache *new)
{
struct rb_root *root = &dso->data.cache;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct dso_cache *cache;
u64 offset = new->offset;
mutex_lock(&dso->lock);
while (*p != NULL) {
u64 end;
parent = *p;
cache = rb_entry(parent, struct dso_cache, rb_node);
end = cache->offset + DSO__DATA_CACHE_SIZE;
if (offset < cache->offset)
p = &(*p)->rb_left;
else if (offset >= end)
p = &(*p)->rb_right;
else
goto out;
}
rb_link_node(&new->rb_node, parent, p);
rb_insert_color(&new->rb_node, root);
cache = NULL;
out:
mutex_unlock(&dso->lock);
return cache;
}
static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
u64 size, bool out)
{
u64 cache_offset = offset - cache->offset;
u64 cache_size = min(cache->size - cache_offset, size);
if (out)
memcpy(data, cache->data + cache_offset, cache_size);
else
memcpy(cache->data + cache_offset, data, cache_size);
return cache_size;
}
static ssize_t file_read(struct dso *dso, struct machine *machine,
u64 offset, char *data)
{
ssize_t ret;
pthread_mutex_lock(&dso__data_open_lock);
/*
* dso->data.fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
try_to_open_dso(dso, machine);
if (dso->data.fd < 0) {
dso->data.status = DSO_DATA_STATUS_ERROR;
ret = -errno;
goto out;
}
ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
out:
pthread_mutex_unlock(&dso__data_open_lock);
return ret;
}
static struct dso_cache *dso_cache__populate(struct dso *dso,
struct machine *machine,
u64 offset, ssize_t *ret)
{
u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
struct dso_cache *cache;
struct dso_cache *old;
cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
if (!cache) {
*ret = -ENOMEM;
return NULL;
}
#ifdef HAVE_LIBBPF_SUPPORT
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
*ret = bpf_read(dso, cache_offset, cache->data);
else
#endif
if (dso->binary_type == DSO_BINARY_TYPE__OOL)
*ret = DSO__DATA_CACHE_SIZE;
else
*ret = file_read(dso, machine, cache_offset, cache->data);
if (*ret <= 0) {
free(cache);
return NULL;
}
cache->offset = cache_offset;
cache->size = *ret;
old = dso_cache__insert(dso, cache);
if (old) {
/* we lose the race */
free(cache);
cache = old;
}
return cache;
}
static struct dso_cache *dso_cache__find(struct dso *dso,
struct machine *machine,
u64 offset,
ssize_t *ret)
{
struct dso_cache *cache = __dso_cache__find(dso, offset);
return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
}
static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size, bool out)
{
struct dso_cache *cache;
ssize_t ret = 0;
cache = dso_cache__find(dso, machine, offset, &ret);
if (!cache)
return ret;
return dso_cache__memcpy(cache, offset, data, size, out);
}
/*
* Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
* in the rb_tree. Any read to already cached data is served
* by cached data. Writes update the cache only, not the backing file.
*/
static ssize_t cached_io(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size, bool out)
{
ssize_t r = 0;
u8 *p = data;
do {
ssize_t ret;
ret = dso_cache_io(dso, machine, offset, p, size, out);
if (ret < 0)
return ret;
/* Reached EOF, return what we have. */
if (!ret)
break;
BUG_ON(ret > size);
r += ret;
p += ret;
offset += ret;
size -= ret;
} while (size);
return r;
}
static int file_size(struct dso *dso, struct machine *machine)
{
int ret = 0;
struct stat st;
char sbuf[STRERR_BUFSIZE];
pthread_mutex_lock(&dso__data_open_lock);
/*
* dso->data.fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
try_to_open_dso(dso, machine);
if (dso->data.fd < 0) {
ret = -errno;
dso->data.status = DSO_DATA_STATUS_ERROR;
goto out;
}
if (fstat(dso->data.fd, &st) < 0) {
ret = -errno;
pr_err("dso cache fstat failed: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
dso->data.status = DSO_DATA_STATUS_ERROR;
goto out;
}
dso->data.file_size = st.st_size;
out:
pthread_mutex_unlock(&dso__data_open_lock);
return ret;
}
int dso__data_file_size(struct dso *dso, struct machine *machine)
{
if (dso->data.file_size)
return 0;
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
#ifdef HAVE_LIBBPF_SUPPORT
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
return bpf_size(dso);
#endif
return file_size(dso, machine);
}
/**
* dso__data_size - Return dso data size
* @dso: dso object
* @machine: machine object
*
* Return: dso data size
*/
off_t dso__data_size(struct dso *dso, struct machine *machine)
{
if (dso__data_file_size(dso, machine))
return -1;
/* For now just estimate dso data size is close to file size */
return dso->data.file_size;
}
static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size,
bool out)
{
if (dso__data_file_size(dso, machine))
return -1;
/* Check the offset sanity. */
if (offset > dso->data.file_size)
return -1;
if (offset + size < offset)
return -1;
return cached_io(dso, machine, offset, data, size, out);
}
/**
* dso__data_read_offset - Read data from dso file offset
* @dso: dso object
* @machine: machine object
* @offset: file offset
* @data: buffer to store data
* @size: size of the @data buffer
*
* External interface to read data from dso file offset. Open
* dso data file and use cached_read to get the data.
*/
ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
return data_read_write_offset(dso, machine, offset, data, size, true);
}
/**
* dso__data_read_addr - Read data from dso address
* @dso: dso object
* @machine: machine object
* @add: virtual memory address
* @data: buffer to store data
* @size: size of the @data buffer
*
* External interface to read data from dso address.
*/
ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
u8 *data, ssize_t size)
{
u64 offset = map__map_ip(map, addr);
return dso__data_read_offset(dso, machine, offset, data, size);
}
/**
* dso__data_write_cache_offs - Write data to dso data cache at file offset
* @dso: dso object
* @machine: machine object
* @offset: file offset
* @data: buffer to write
* @size: size of the @data buffer
*
* Write into the dso file data cache, but do not change the file itself.
*/
ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
u64 offset, const u8 *data_in, ssize_t size)
{
u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
return data_read_write_offset(dso, machine, offset, data, size, false);
}
/**
* dso__data_write_cache_addr - Write data to dso data cache at dso address
* @dso: dso object
* @machine: machine object
* @add: virtual memory address
* @data: buffer to write
* @size: size of the @data buffer
*
* External interface to write into the dso file data cache, but do not change
* the file itself.
*/
ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
const u8 *data, ssize_t size)
{
u64 offset = map__map_ip(map, addr);
return dso__data_write_cache_offs(dso, machine, offset, data, size);
}
struct map *dso__new_map(const char *name)
{
struct map *map = NULL;
struct dso *dso = dso__new(name);
if (dso) {
map = map__new2(0, dso);
dso__put(dso);
}
return map;
}
struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
const char *short_name, int dso_type)
{
/*
* The kernel dso could be created by build_id processing.
*/
struct dso *dso = machine__findnew_dso(machine, name);
/*
* We need to run this in all cases, since during the build_id
* processing we had no idea this was the kernel dso.
*/
if (dso != NULL) {
dso__set_short_name(dso, short_name, false);
dso->kernel = dso_type;
}
return dso;
}
static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
{
struct rb_root *root = dso->root;
if (name == NULL)
return;
if (dso->long_name_allocated)
free((char *)dso->long_name);
if (root) {
rb_erase(&dso->rb_node, root);
/*
* __dsos__findnew_link_by_longname_id() isn't guaranteed to
* add it back, so a clean removal is required here.
*/
RB_CLEAR_NODE(&dso->rb_node);
dso->root = NULL;
}
dso->long_name = name;
dso->long_name_len = strlen(name);
dso->long_name_allocated = name_allocated;
if (root)
__dsos__findnew_link_by_longname_id(root, dso, NULL, id);
}
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
{
dso__set_long_name_id(dso, name, NULL, name_allocated);
}
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
{
if (name == NULL)
return;
if (dso->short_name_allocated)
free((char *)dso->short_name);
dso->short_name = name;
dso->short_name_len = strlen(name);
dso->short_name_allocated = name_allocated;
}
int dso__name_len(const struct dso *dso)
{
if (!dso)
return strlen("[unknown]");
if (verbose > 0)
return dso->long_name_len;
return dso->short_name_len;
}
bool dso__loaded(const struct dso *dso)
{
return dso->loaded;
}
bool dso__sorted_by_name(const struct dso *dso)
{
return dso->sorted_by_name;
}
void dso__set_sorted_by_name(struct dso *dso)
{
dso->sorted_by_name = true;
}
struct dso *dso__new_id(const char *name, struct dso_id *id)
{
struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
if (dso != NULL) {
strcpy(dso->name, name);
if (id)
dso->id = *id;
dso__set_long_name_id(dso, dso->name, id, false);
dso__set_short_name(dso, dso->name, false);
dso->symbols = RB_ROOT_CACHED;
dso->symbol_names = NULL;
dso->symbol_names_len = 0;
dso->data.cache = RB_ROOT;
dso->inlined_nodes = RB_ROOT_CACHED;
dso->srclines = RB_ROOT_CACHED;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->is_64_bit = (sizeof(void *) == 8);
dso->loaded = 0;
dso->rel = 0;
dso->sorted_by_name = 0;
dso->has_build_id = 0;
dso->has_srcline = 1;
dso->a2l_fails = 1;
dso->kernel = DSO_SPACE__USER;
dso->needs_swap = DSO_SWAP__UNSET;
dso->comp = COMP_ID__NONE;
RB_CLEAR_NODE(&dso->rb_node);
dso->root = NULL;
INIT_LIST_HEAD(&dso->node);
INIT_LIST_HEAD(&dso->data.open_entry);
mutex_init(&dso->lock);
refcount_set(&dso->refcnt, 1);
}
return dso;
}
struct dso *dso__new(const char *name)
{
return dso__new_id(name, NULL);
}
void dso__delete(struct dso *dso)
{
if (!RB_EMPTY_NODE(&dso->rb_node))
pr_err("DSO %s is still in rbtree when being deleted!\n",
dso->long_name);
/* free inlines first, as they reference symbols */
inlines__tree_delete(&dso->inlined_nodes);
srcline__tree_delete(&dso->srclines);
symbols__delete(&dso->symbols);
dso->symbol_names_len = 0;
zfree(&dso->symbol_names);
if (dso->short_name_allocated) {
zfree((char **)&dso->short_name);
dso->short_name_allocated = false;
}
if (dso->long_name_allocated) {
zfree((char **)&dso->long_name);
dso->long_name_allocated = false;
}
dso__data_close(dso);
auxtrace_cache__free(dso->auxtrace_cache);
dso_cache__free(dso);
dso__free_a2l(dso);
zfree(&dso->symsrc_filename);
nsinfo__zput(dso->nsinfo);
mutex_destroy(&dso->lock);
free(dso);
}
struct dso *dso__get(struct dso *dso)
{
if (dso)
refcount_inc(&dso->refcnt);
return dso;
}
void dso__put(struct dso *dso)
{
if (dso && refcount_dec_and_test(&dso->refcnt))
dso__delete(dso);
}
void dso__set_build_id(struct dso *dso, struct build_id *bid)
{
dso->bid = *bid;
dso->has_build_id = 1;
}
bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
{
if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
/*
* For the backward compatibility, it allows a build-id has
* trailing zeros.
*/
return !memcmp(dso->bid.data, bid->data, bid->size) &&
!memchr_inv(&dso->bid.data[bid->size], 0,
dso->bid.size - bid->size);
}
return dso->bid.size == bid->size &&
memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
}
void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
{
char path[PATH_MAX];
if (machine__is_default_guest(machine))
return;
sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
if (sysfs__read_build_id(path, &dso->bid) == 0)
dso->has_build_id = true;
}
int dso__kernel_module_get_build_id(struct dso *dso,
const char *root_dir)
{
char filename[PATH_MAX];
/*
* kernel module short names are of the form "[module]" and
* we need just "module" here.
*/
const char *name = dso->short_name + 1;
snprintf(filename, sizeof(filename),
"%s/sys/module/%.*s/notes/.note.gnu.build-id",
root_dir, (int)strlen(name) - 1, name);
if (sysfs__read_build_id(filename, &dso->bid) == 0)
dso->has_build_id = true;
return 0;
}
static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
{
char sbuild_id[SBUILD_ID_SIZE];
build_id__sprintf(&dso->bid, sbuild_id);
return fprintf(fp, "%s", sbuild_id);
}
size_t dso__fprintf(struct dso *dso, FILE *fp)
{
struct rb_node *nd;
size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
if (dso->short_name != dso->long_name)
ret += fprintf(fp, "%s, ", dso->long_name);
ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}
return ret;
}
enum dso_type dso__type(struct dso *dso, struct machine *machine)
{
int fd;
enum dso_type type = DSO__TYPE_UNKNOWN;
fd = dso__data_get_fd(dso, machine);
if (fd >= 0) {
type = dso__type_fd(fd);
dso__data_put_fd(dso);
}
return type;
}
int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
{
int idx, errnum = dso->load_errno;
/*
* This must have a same ordering as the enum dso_load_errno.
*/
static const char *dso_load__error_str[] = {
"Internal tools/perf/ library error",
"Invalid ELF file",
"Can not read build id",
"Mismatching build id",
"Decompression failure",
};
BUG_ON(buflen == 0);
if (errnum >= 0) {
const char *err = str_error_r(errnum, buf, buflen);
if (err != buf)
scnprintf(buf, buflen, "%s", err);
return 0;
}
if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
return -1;
idx = errnum - __DSO_LOAD_ERRNO__START;
scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
return 0;
}
| linux-master | tools/perf/util/dso.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <linux/kernel.h>
#include "vdso.h"
#include "dso.h"
#include <internal/lib.h>
#include "map.h"
#include "symbol.h"
#include "machine.h"
#include "thread.h"
#include "linux/string.h"
#include <linux/zalloc.h>
#include "debug.h"
/*
* Include definition of find_map() also used in perf-read-vdso.c for
* building perf-read-vdso32 and perf-read-vdsox32.
*/
#include "find-map.c"
#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
struct vdso_file {
bool found;
bool error;
char temp_file_name[sizeof(VDSO__TEMP_FILE_NAME)];
const char *dso_name;
const char *read_prog;
};
struct vdso_info {
struct vdso_file vdso;
#if BITS_PER_LONG == 64
struct vdso_file vdso32;
struct vdso_file vdsox32;
#endif
};
static struct vdso_info *vdso_info__new(void)
{
static const struct vdso_info vdso_info_init = {
.vdso = {
.temp_file_name = VDSO__TEMP_FILE_NAME,
.dso_name = DSO__NAME_VDSO,
},
#if BITS_PER_LONG == 64
.vdso32 = {
.temp_file_name = VDSO__TEMP_FILE_NAME,
.dso_name = DSO__NAME_VDSO32,
.read_prog = "perf-read-vdso32",
},
.vdsox32 = {
.temp_file_name = VDSO__TEMP_FILE_NAME,
.dso_name = DSO__NAME_VDSOX32,
.read_prog = "perf-read-vdsox32",
},
#endif
};
return memdup(&vdso_info_init, sizeof(vdso_info_init));
}
static char *get_file(struct vdso_file *vdso_file)
{
char *vdso = NULL;
char *buf = NULL;
void *start, *end;
size_t size;
int fd;
if (vdso_file->found)
return vdso_file->temp_file_name;
if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME))
return NULL;
size = end - start;
buf = memdup(start, size);
if (!buf)
return NULL;
fd = mkstemp(vdso_file->temp_file_name);
if (fd < 0)
goto out;
if (size == (size_t) write(fd, buf, size))
vdso = vdso_file->temp_file_name;
close(fd);
out:
free(buf);
vdso_file->found = (vdso != NULL);
vdso_file->error = !vdso_file->found;
return vdso;
}
void machine__exit_vdso(struct machine *machine)
{
struct vdso_info *vdso_info = machine->vdso_info;
if (!vdso_info)
return;
if (vdso_info->vdso.found)
unlink(vdso_info->vdso.temp_file_name);
#if BITS_PER_LONG == 64
if (vdso_info->vdso32.found)
unlink(vdso_info->vdso32.temp_file_name);
if (vdso_info->vdsox32.found)
unlink(vdso_info->vdsox32.temp_file_name);
#endif
zfree(&machine->vdso_info);
}
static struct dso *__machine__addnew_vdso(struct machine *machine, const char *short_name,
const char *long_name)
{
struct dso *dso;
dso = dso__new(short_name);
if (dso != NULL) {
__dsos__add(&machine->dsos, dso);
dso__set_long_name(dso, long_name, false);
/* Put dso here because __dsos_add already got it */
dso__put(dso);
}
return dso;
}
static enum dso_type machine__thread_dso_type(struct machine *machine,
struct thread *thread)
{
enum dso_type dso_type = DSO__TYPE_UNKNOWN;
struct map_rb_node *rb_node;
maps__for_each_entry(thread__maps(thread), rb_node) {
struct dso *dso = map__dso(rb_node->map);
if (!dso || dso->long_name[0] != '/')
continue;
dso_type = dso__type(dso, machine);
if (dso_type != DSO__TYPE_UNKNOWN)
break;
}
return dso_type;
}
#if BITS_PER_LONG == 64
static int vdso__do_copy_compat(FILE *f, int fd)
{
char buf[4096];
size_t count;
while (1) {
count = fread(buf, 1, sizeof(buf), f);
if (ferror(f))
return -errno;
if (feof(f))
break;
if (count && writen(fd, buf, count) != (ssize_t)count)
return -errno;
}
return 0;
}
static int vdso__copy_compat(const char *prog, int fd)
{
FILE *f;
int err;
f = popen(prog, "r");
if (!f)
return -errno;
err = vdso__do_copy_compat(f, fd);
if (pclose(f) == -1)
return -errno;
return err;
}
static int vdso__create_compat_file(const char *prog, char *temp_name)
{
int fd, err;
fd = mkstemp(temp_name);
if (fd < 0)
return -errno;
err = vdso__copy_compat(prog, fd);
if (close(fd) == -1)
return -errno;
return err;
}
static const char *vdso__get_compat_file(struct vdso_file *vdso_file)
{
int err;
if (vdso_file->found)
return vdso_file->temp_file_name;
if (vdso_file->error)
return NULL;
err = vdso__create_compat_file(vdso_file->read_prog,
vdso_file->temp_file_name);
if (err) {
pr_err("%s failed, error %d\n", vdso_file->read_prog, err);
vdso_file->error = true;
return NULL;
}
vdso_file->found = true;
return vdso_file->temp_file_name;
}
static struct dso *__machine__findnew_compat(struct machine *machine,
struct vdso_file *vdso_file)
{
const char *file_name;
struct dso *dso;
dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true);
if (dso)
goto out;
file_name = vdso__get_compat_file(vdso_file);
if (!file_name)
goto out;
dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
out:
return dso;
}
static int __machine__findnew_vdso_compat(struct machine *machine,
struct thread *thread,
struct vdso_info *vdso_info,
struct dso **dso)
{
enum dso_type dso_type;
dso_type = machine__thread_dso_type(machine, thread);
#ifndef HAVE_PERF_READ_VDSO32
if (dso_type == DSO__TYPE_32BIT)
return 0;
#endif
#ifndef HAVE_PERF_READ_VDSOX32
if (dso_type == DSO__TYPE_X32BIT)
return 0;
#endif
switch (dso_type) {
case DSO__TYPE_32BIT:
*dso = __machine__findnew_compat(machine, &vdso_info->vdso32);
return 1;
case DSO__TYPE_X32BIT:
*dso = __machine__findnew_compat(machine, &vdso_info->vdsox32);
return 1;
case DSO__TYPE_UNKNOWN:
case DSO__TYPE_64BIT:
default:
return 0;
}
}
#endif
static struct dso *machine__find_vdso(struct machine *machine,
struct thread *thread)
{
struct dso *dso = NULL;
enum dso_type dso_type;
dso_type = machine__thread_dso_type(machine, thread);
switch (dso_type) {
case DSO__TYPE_32BIT:
dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO32, true);
if (!dso) {
dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO,
true);
if (dso && dso_type != dso__type(dso, machine))
dso = NULL;
}
break;
case DSO__TYPE_X32BIT:
dso = __dsos__find(&machine->dsos, DSO__NAME_VDSOX32, true);
break;
case DSO__TYPE_64BIT:
case DSO__TYPE_UNKNOWN:
default:
dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
break;
}
return dso;
}
struct dso *machine__findnew_vdso(struct machine *machine,
struct thread *thread)
{
struct vdso_info *vdso_info;
struct dso *dso = NULL;
down_write(&machine->dsos.lock);
if (!machine->vdso_info)
machine->vdso_info = vdso_info__new();
vdso_info = machine->vdso_info;
if (!vdso_info)
goto out_unlock;
dso = machine__find_vdso(machine, thread);
if (dso)
goto out_unlock;
#if BITS_PER_LONG == 64
if (__machine__findnew_vdso_compat(machine, thread, vdso_info, &dso))
goto out_unlock;
#endif
dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
if (!dso) {
char *file;
file = get_file(&vdso_info->vdso);
if (file)
dso = __machine__addnew_vdso(machine, DSO__NAME_VDSO, file);
}
out_unlock:
dso__get(dso);
up_write(&machine->dsos.lock);
return dso;
}
bool dso__is_vdso(struct dso *dso)
{
return !strcmp(dso->short_name, DSO__NAME_VDSO) ||
!strcmp(dso->short_name, DSO__NAME_VDSO32) ||
!strcmp(dso->short_name, DSO__NAME_VDSOX32);
}
| linux-master | tools/perf/util/vdso.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <elfutils/libdw.h>
#include <elfutils/libdwfl.h>
#include <inttypes.h>
#include <errno.h>
#include "debug.h"
#include "dso.h"
#include "unwind.h"
#include "unwind-libdw.h"
#include "machine.h"
#include "map.h"
#include "symbol.h"
#include "thread.h"
#include <linux/types.h>
#include <linux/zalloc.h>
#include "event.h"
#include "perf_regs.h"
#include "callchain.h"
#include "util/env.h"
static char *debuginfo_path;
static int __find_debuginfo(Dwfl_Module *mod __maybe_unused, void **userdata,
const char *modname __maybe_unused, Dwarf_Addr base __maybe_unused,
const char *file_name, const char *debuglink_file __maybe_unused,
GElf_Word debuglink_crc __maybe_unused, char **debuginfo_file_name)
{
const struct dso *dso = *userdata;
assert(dso);
if (dso->symsrc_filename && strcmp (file_name, dso->symsrc_filename))
*debuginfo_file_name = strdup(dso->symsrc_filename);
return -1;
}
static const Dwfl_Callbacks offline_callbacks = {
.find_debuginfo = __find_debuginfo,
.debuginfo_path = &debuginfo_path,
.section_address = dwfl_offline_section_address,
// .find_elf is not set as we use dwfl_report_elf() instead.
};
static int __report_module(struct addr_location *al, u64 ip,
struct unwind_info *ui)
{
Dwfl_Module *mod;
struct dso *dso = NULL;
/*
* Some callers will use al->sym, so we can't just use the
* cheaper thread__find_map() here.
*/
thread__find_symbol(ui->thread, PERF_RECORD_MISC_USER, ip, al);
if (al->map)
dso = map__dso(al->map);
if (!dso)
return 0;
mod = dwfl_addrmodule(ui->dwfl, ip);
if (mod) {
Dwarf_Addr s;
dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
if (s != map__start(al->map) - map__pgoff(al->map))
mod = 0;
}
if (!mod) {
char filename[PATH_MAX];
__symbol__join_symfs(filename, sizeof(filename), dso->long_name);
mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
map__start(al->map) - map__pgoff(al->map), false);
}
if (!mod) {
char filename[PATH_MAX];
if (dso__build_id_filename(dso, filename, sizeof(filename), false))
mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
map__start(al->map) - map__pgoff(al->map), false);
}
if (mod) {
void **userdatap;
dwfl_module_info(mod, &userdatap, NULL, NULL, NULL, NULL, NULL, NULL);
*userdatap = dso;
}
return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
}
static int report_module(u64 ip, struct unwind_info *ui)
{
struct addr_location al;
int res;
addr_location__init(&al);
res = __report_module(&al, ip, ui);
addr_location__exit(&al);
return res;
}
/*
* Store all entries within entries array,
* we will process it after we finish unwind.
*/
static int entry(u64 ip, struct unwind_info *ui)
{
struct unwind_entry *e = &ui->entries[ui->idx++];
struct addr_location al;
addr_location__init(&al);
if (__report_module(&al, ip, ui)) {
addr_location__exit(&al);
return -1;
}
e->ip = ip;
e->ms.maps = al.maps;
e->ms.map = al.map;
e->ms.sym = al.sym;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
al.sym ? al.sym->name : "''",
ip,
al.map ? map__map_ip(al.map, ip) : (u64) 0);
addr_location__exit(&al);
return 0;
}
static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp)
{
/* We want only single thread to be processed. */
if (*thread_argp != NULL)
return 0;
*thread_argp = arg;
return dwfl_pid(dwfl);
}
static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
Dwarf_Word *data)
{
struct addr_location al;
ssize_t size;
struct dso *dso;
addr_location__init(&al);
if (!thread__find_map(ui->thread, PERF_RECORD_MISC_USER, addr, &al)) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
goto out_fail;
}
dso = map__dso(al.map);
if (!dso)
goto out_fail;
size = dso__data_read_addr(dso, al.map, ui->machine, addr, (u8 *) data, sizeof(*data));
addr_location__exit(&al);
return !(size == sizeof(*data));
out_fail:
addr_location__exit(&al);
return -1;
}
static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *result,
void *arg)
{
struct unwind_info *ui = arg;
const char *arch = perf_env__arch(ui->machine->env);
struct stack_dump *stack = &ui->sample->user_stack;
u64 start, end;
int offset;
int ret;
ret = perf_reg_value(&start, &ui->sample->user_regs,
perf_arch_reg_sp(arch));
if (ret)
return false;
end = start + stack->size;
/* Check overflow. */
if (addr + sizeof(Dwarf_Word) < addr)
return false;
if (addr < start || addr + sizeof(Dwarf_Word) > end) {
ret = access_dso_mem(ui, addr, result);
if (ret) {
pr_debug("unwind: access_mem 0x%" PRIx64 " not inside range"
" 0x%" PRIx64 "-0x%" PRIx64 "\n",
addr, start, end);
return false;
}
return true;
}
offset = addr - start;
*result = *(Dwarf_Word *)&stack->data[offset];
pr_debug("unwind: access_mem addr 0x%" PRIx64 ", val %lx, offset %d\n",
addr, (unsigned long)*result, offset);
return true;
}
static const Dwfl_Thread_Callbacks callbacks = {
.next_thread = next_thread,
.memory_read = memory_read,
.set_initial_registers = libdw__arch_set_initial_registers,
};
static int
frame_callback(Dwfl_Frame *state, void *arg)
{
struct unwind_info *ui = arg;
Dwarf_Addr pc;
bool isactivation;
if (!dwfl_frame_pc(state, &pc, NULL)) {
if (!ui->best_effort)
pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT;
}
// report the module before we query for isactivation
report_module(pc, ui);
if (!dwfl_frame_pc(state, &pc, &isactivation)) {
if (!ui->best_effort)
pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT;
}
if (!isactivation)
--pc;
return entry(pc, ui) || !(--ui->max_stack) ?
DWARF_CB_ABORT : DWARF_CB_OK;
}
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data,
int max_stack,
bool best_effort)
{
struct unwind_info *ui, ui_buf = {
.sample = data,
.thread = thread,
.machine = RC_CHK_ACCESS(thread__maps(thread))->machine,
.cb = cb,
.arg = arg,
.max_stack = max_stack,
.best_effort = best_effort
};
const char *arch = perf_env__arch(ui_buf.machine->env);
Dwarf_Word ip;
int err = -EINVAL, i;
if (!data->user_regs.regs)
return -EINVAL;
ui = zalloc(sizeof(ui_buf) + sizeof(ui_buf.entries[0]) * max_stack);
if (!ui)
return -ENOMEM;
*ui = ui_buf;
ui->dwfl = dwfl_begin(&offline_callbacks);
if (!ui->dwfl)
goto out;
err = perf_reg_value(&ip, &data->user_regs, perf_arch_reg_ip(arch));
if (err)
goto out;
err = report_module(ip, ui);
if (err)
goto out;
err = !dwfl_attach_state(ui->dwfl, EM_NONE, thread__tid(thread), &callbacks, ui);
if (err)
goto out;
err = dwfl_getthread_frames(ui->dwfl, thread__tid(thread), frame_callback, ui);
if (err && ui->max_stack != max_stack)
err = 0;
/*
* Display what we got based on the order setup.
*/
for (i = 0; i < ui->idx && !err; i++) {
int j = i;
if (callchain_param.order == ORDER_CALLER)
j = ui->idx - i - 1;
err = ui->entries[j].ip ? ui->cb(&ui->entries[j], ui->arg) : 0;
}
out:
if (err)
pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1));
dwfl_end(ui->dwfl);
free(ui);
return 0;
}
| linux-master | tools/perf/util/unwind-libdw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* db-export.c: Support for exporting data suitable for import to a database
* Copyright (c) 2014, Intel Corporation.
*/
#include <errno.h>
#include <stdlib.h>
#include "dso.h"
#include "evsel.h"
#include "machine.h"
#include "thread.h"
#include "comm.h"
#include "symbol.h"
#include "map.h"
#include "event.h"
#include "thread-stack.h"
#include "callchain.h"
#include "call-path.h"
#include "db-export.h"
#include <linux/zalloc.h>
int db_export__init(struct db_export *dbe)
{
memset(dbe, 0, sizeof(struct db_export));
return 0;
}
void db_export__exit(struct db_export *dbe)
{
call_return_processor__free(dbe->crp);
dbe->crp = NULL;
}
int db_export__evsel(struct db_export *dbe, struct evsel *evsel)
{
if (evsel->db_id)
return 0;
evsel->db_id = ++dbe->evsel_last_db_id;
if (dbe->export_evsel)
return dbe->export_evsel(dbe, evsel);
return 0;
}
int db_export__machine(struct db_export *dbe, struct machine *machine)
{
if (machine->db_id)
return 0;
machine->db_id = ++dbe->machine_last_db_id;
if (dbe->export_machine)
return dbe->export_machine(dbe, machine);
return 0;
}
int db_export__thread(struct db_export *dbe, struct thread *thread,
struct machine *machine, struct thread *main_thread)
{
u64 main_thread_db_id = 0;
if (thread__db_id(thread))
return 0;
thread__set_db_id(thread, ++dbe->thread_last_db_id);
if (main_thread)
main_thread_db_id = thread__db_id(main_thread);
if (dbe->export_thread)
return dbe->export_thread(dbe, thread, main_thread_db_id,
machine);
return 0;
}
static int __db_export__comm(struct db_export *dbe, struct comm *comm,
struct thread *thread)
{
comm->db_id = ++dbe->comm_last_db_id;
if (dbe->export_comm)
return dbe->export_comm(dbe, comm, thread);
return 0;
}
int db_export__comm(struct db_export *dbe, struct comm *comm,
struct thread *thread)
{
if (comm->db_id)
return 0;
return __db_export__comm(dbe, comm, thread);
}
/*
* Export the "exec" comm. The "exec" comm is the program / application command
* name at the time it first executes. It is used to group threads for the same
* program. Note that the main thread pid (or thread group id tgid) cannot be
* used because it does not change when a new program is exec'ed.
*/
int db_export__exec_comm(struct db_export *dbe, struct comm *comm,
struct thread *main_thread)
{
int err;
if (comm->db_id)
return 0;
err = __db_export__comm(dbe, comm, main_thread);
if (err)
return err;
/*
* Record the main thread for this comm. Note that the main thread can
* have many "exec" comms because there will be a new one every time it
* exec's. An "exec" comm however will only ever have 1 main thread.
* That is different to any other threads for that same program because
* exec() will effectively kill them, so the relationship between the
* "exec" comm and non-main threads is 1-to-1. That is why
* db_export__comm_thread() is called here for the main thread, but it
* is called for non-main threads when they are exported.
*/
return db_export__comm_thread(dbe, comm, main_thread);
}
int db_export__comm_thread(struct db_export *dbe, struct comm *comm,
struct thread *thread)
{
u64 db_id;
db_id = ++dbe->comm_thread_last_db_id;
if (dbe->export_comm_thread)
return dbe->export_comm_thread(dbe, db_id, comm, thread);
return 0;
}
int db_export__dso(struct db_export *dbe, struct dso *dso,
struct machine *machine)
{
if (dso->db_id)
return 0;
dso->db_id = ++dbe->dso_last_db_id;
if (dbe->export_dso)
return dbe->export_dso(dbe, dso, machine);
return 0;
}
int db_export__symbol(struct db_export *dbe, struct symbol *sym,
struct dso *dso)
{
u64 *sym_db_id = symbol__priv(sym);
if (*sym_db_id)
return 0;
*sym_db_id = ++dbe->symbol_last_db_id;
if (dbe->export_symbol)
return dbe->export_symbol(dbe, sym, dso);
return 0;
}
static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
u64 *dso_db_id, u64 *sym_db_id, u64 *offset)
{
int err;
if (al->map) {
struct dso *dso = map__dso(al->map);
err = db_export__dso(dbe, dso, maps__machine(al->maps));
if (err)
return err;
*dso_db_id = dso->db_id;
if (!al->sym) {
al->sym = symbol__new(al->addr, 0, 0, 0, "unknown");
if (al->sym)
dso__insert_symbol(dso, al->sym);
}
if (al->sym) {
u64 *db_id = symbol__priv(al->sym);
err = db_export__symbol(dbe, al->sym, dso);
if (err)
return err;
*sym_db_id = *db_id;
*offset = al->addr - al->sym->start;
}
}
return 0;
}
static struct call_path *call_path_from_sample(struct db_export *dbe,
struct machine *machine,
struct thread *thread,
struct perf_sample *sample,
struct evsel *evsel)
{
u64 kernel_start = machine__kernel_start(machine);
struct call_path *current = &dbe->cpr->call_path;
enum chain_order saved_order = callchain_param.order;
struct callchain_cursor *cursor;
int err;
if (!symbol_conf.use_callchain || !sample->callchain)
return NULL;
/*
* Since the call path tree must be built starting with the root, we
* must use ORDER_CALL for call chain resolution, in order to process
* the callchain starting with the root node and ending with the leaf.
*/
callchain_param.order = ORDER_CALLER;
cursor = get_tls_callchain_cursor();
err = thread__resolve_callchain(thread, cursor, evsel,
sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
if (err) {
callchain_param.order = saved_order;
return NULL;
}
callchain_cursor_commit(cursor);
while (1) {
struct callchain_cursor_node *node;
struct addr_location al;
u64 dso_db_id = 0, sym_db_id = 0, offset = 0;
node = callchain_cursor_current(cursor);
if (!node)
break;
/*
* Handle export of symbol and dso for this node by
* constructing an addr_location struct and then passing it to
* db_ids_from_al() to perform the export.
*/
addr_location__init(&al);
al.sym = node->ms.sym;
al.map = node->ms.map;
al.maps = thread__maps(thread);
al.addr = node->ip;
if (al.map && !al.sym)
al.sym = dso__find_symbol(map__dso(al.map), al.addr);
db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
/* add node to the call path tree if it doesn't exist */
current = call_path__findnew(dbe->cpr, current,
al.sym, node->ip,
kernel_start);
callchain_cursor_advance(cursor);
addr_location__exit(&al);
}
/* Reset the callchain order to its prior value. */
callchain_param.order = saved_order;
if (current == &dbe->cpr->call_path) {
/* Bail because the callchain was empty. */
return NULL;
}
return current;
}
int db_export__branch_type(struct db_export *dbe, u32 branch_type,
const char *name)
{
if (dbe->export_branch_type)
return dbe->export_branch_type(dbe, branch_type, name);
return 0;
}
static int db_export__threads(struct db_export *dbe, struct thread *thread,
struct thread *main_thread,
struct machine *machine, struct comm **comm_ptr)
{
struct comm *comm = NULL;
struct comm *curr_comm;
int err;
if (main_thread) {
/*
* A thread has a reference to the main thread, so export the
* main thread first.
*/
err = db_export__thread(dbe, main_thread, machine, main_thread);
if (err)
return err;
/*
* Export comm before exporting the non-main thread because
* db_export__comm_thread() can be called further below.
*/
comm = machine__thread_exec_comm(machine, main_thread);
if (comm) {
err = db_export__exec_comm(dbe, comm, main_thread);
if (err)
return err;
*comm_ptr = comm;
}
}
if (thread != main_thread) {
/*
* For a non-main thread, db_export__comm_thread() must be
* called only if thread has not previously been exported.
*/
bool export_comm_thread = comm && !thread__db_id(thread);
err = db_export__thread(dbe, thread, machine, main_thread);
if (err)
return err;
if (export_comm_thread) {
err = db_export__comm_thread(dbe, comm, thread);
if (err)
return err;
}
}
curr_comm = thread__comm(thread);
if (curr_comm)
return db_export__comm(dbe, curr_comm, thread);
return 0;
}
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
struct addr_location *al, struct addr_location *addr_al)
{
struct thread *thread = al->thread;
struct export_sample es = {
.event = event,
.sample = sample,
.evsel = evsel,
.al = al,
};
struct thread *main_thread;
struct comm *comm = NULL;
struct machine *machine;
int err;
err = db_export__evsel(dbe, evsel);
if (err)
return err;
machine = maps__machine(al->maps);
err = db_export__machine(dbe, machine);
if (err)
return err;
main_thread = thread__main_thread(machine, thread);
err = db_export__threads(dbe, thread, main_thread, machine, &comm);
if (err)
goto out_put;
if (comm)
es.comm_db_id = comm->db_id;
es.db_id = ++dbe->sample_last_db_id;
err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset);
if (err)
goto out_put;
if (dbe->cpr) {
struct call_path *cp = call_path_from_sample(dbe, machine,
thread, sample,
evsel);
if (cp) {
db_export__call_path(dbe, cp);
es.call_path_id = cp->db_id;
}
}
if (addr_al) {
err = db_ids_from_al(dbe, addr_al, &es.addr_dso_db_id,
&es.addr_sym_db_id, &es.addr_offset);
if (err)
goto out_put;
if (dbe->crp) {
err = thread_stack__process(thread, comm, sample, al,
addr_al, es.db_id,
dbe->crp);
if (err)
goto out_put;
}
}
if (dbe->export_sample)
err = dbe->export_sample(dbe, &es);
out_put:
thread__put(main_thread);
return err;
}
static struct {
u32 branch_type;
const char *name;
} branch_types[] = {
{0, "no branch"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "conditional jump"},
{PERF_IP_FLAG_BRANCH, "unconditional jump"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT,
"software interrupt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT,
"return from interrupt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET,
"system call"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET,
"return from system call"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "asynchronous branch"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT, "hardware interrupt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMENTRY, "vm entry"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMEXIT, "vm exit"},
{0, NULL}
};
int db_export__branch_types(struct db_export *dbe)
{
int i, err = 0;
for (i = 0; branch_types[i].name ; i++) {
err = db_export__branch_type(dbe, branch_types[i].branch_type,
branch_types[i].name);
if (err)
break;
}
/* Add trace begin / end variants */
for (i = 0; branch_types[i].name ; i++) {
const char *name = branch_types[i].name;
u32 type = branch_types[i].branch_type;
char buf[64];
if (type == PERF_IP_FLAG_BRANCH ||
(type & (PERF_IP_FLAG_TRACE_BEGIN | PERF_IP_FLAG_TRACE_END)))
continue;
snprintf(buf, sizeof(buf), "trace begin / %s", name);
err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_BEGIN, buf);
if (err)
break;
snprintf(buf, sizeof(buf), "%s / trace end", name);
err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_END, buf);
if (err)
break;
}
return err;
}
int db_export__call_path(struct db_export *dbe, struct call_path *cp)
{
int err;
if (cp->db_id)
return 0;
if (cp->parent) {
err = db_export__call_path(dbe, cp->parent);
if (err)
return err;
}
cp->db_id = ++dbe->call_path_last_db_id;
if (dbe->export_call_path)
return dbe->export_call_path(dbe, cp);
return 0;
}
int db_export__call_return(struct db_export *dbe, struct call_return *cr,
u64 *parent_db_id)
{
int err;
err = db_export__call_path(dbe, cr->cp);
if (err)
return err;
if (!cr->db_id)
cr->db_id = ++dbe->call_return_last_db_id;
if (parent_db_id) {
if (!*parent_db_id)
*parent_db_id = ++dbe->call_return_last_db_id;
cr->parent_db_id = *parent_db_id;
}
if (dbe->export_call_return)
return dbe->export_call_return(dbe, cr);
return 0;
}
static int db_export__pid_tid(struct db_export *dbe, struct machine *machine,
pid_t pid, pid_t tid, u64 *db_id,
struct comm **comm_ptr, bool *is_idle)
{
struct thread *thread = machine__find_thread(machine, pid, tid);
struct thread *main_thread;
int err = 0;
if (!thread || !thread__comm_set(thread))
goto out_put;
*is_idle = !thread__pid(thread) && !thread__tid(thread);
main_thread = thread__main_thread(machine, thread);
err = db_export__threads(dbe, thread, main_thread, machine, comm_ptr);
*db_id = thread__db_id(thread);
thread__put(main_thread);
out_put:
thread__put(thread);
return err;
}
int db_export__switch(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct machine *machine)
{
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
bool out_preempt = out &&
(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT);
int flags = out | (out_preempt << 1);
bool is_idle_a = false, is_idle_b = false;
u64 th_a_id = 0, th_b_id = 0;
u64 comm_out_id, comm_in_id;
struct comm *comm_a = NULL;
struct comm *comm_b = NULL;
u64 th_out_id, th_in_id;
u64 db_id;
int err;
err = db_export__machine(dbe, machine);
if (err)
return err;
err = db_export__pid_tid(dbe, machine, sample->pid, sample->tid,
&th_a_id, &comm_a, &is_idle_a);
if (err)
return err;
if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
pid_t pid = event->context_switch.next_prev_pid;
pid_t tid = event->context_switch.next_prev_tid;
err = db_export__pid_tid(dbe, machine, pid, tid, &th_b_id,
&comm_b, &is_idle_b);
if (err)
return err;
}
/*
* Do not export if both threads are unknown (i.e. not being traced),
* or one is unknown and the other is the idle task.
*/
if ((!th_a_id || is_idle_a) && (!th_b_id || is_idle_b))
return 0;
db_id = ++dbe->context_switch_last_db_id;
if (out) {
th_out_id = th_a_id;
th_in_id = th_b_id;
comm_out_id = comm_a ? comm_a->db_id : 0;
comm_in_id = comm_b ? comm_b->db_id : 0;
} else {
th_out_id = th_b_id;
th_in_id = th_a_id;
comm_out_id = comm_b ? comm_b->db_id : 0;
comm_in_id = comm_a ? comm_a->db_id : 0;
}
if (dbe->export_context_switch)
return dbe->export_context_switch(dbe, db_id, machine, sample,
th_out_id, comm_out_id,
th_in_id, comm_in_id, flags);
return 0;
}
| linux-master | tools/perf/util/db-export.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <string.h>
#include <linux/string.h>
#include <sys/time.h>
#include <linux/time64.h>
#include <time.h>
#include <errno.h>
#include <inttypes.h>
#include <math.h>
#include <linux/ctype.h>
#include "debug.h"
#include "time-utils.h"
#include "session.h"
#include "evlist.h"
int parse_nsec_time(const char *str, u64 *ptime)
{
u64 time_sec, time_nsec;
char *end;
time_sec = strtoul(str, &end, 10);
if (*end != '.' && *end != '\0')
return -1;
if (*end == '.') {
int i;
char nsec_buf[10];
if (strlen(++end) > 9)
return -1;
strncpy(nsec_buf, end, 9);
nsec_buf[9] = '\0';
/* make it nsec precision */
for (i = strlen(nsec_buf); i < 9; i++)
nsec_buf[i] = '0';
time_nsec = strtoul(nsec_buf, &end, 10);
if (*end != '\0')
return -1;
} else
time_nsec = 0;
*ptime = time_sec * NSEC_PER_SEC + time_nsec;
return 0;
}
static int parse_timestr_sec_nsec(struct perf_time_interval *ptime,
char *start_str, char *end_str)
{
if (start_str && (*start_str != '\0') &&
(parse_nsec_time(start_str, &ptime->start) != 0)) {
return -1;
}
if (end_str && (*end_str != '\0') &&
(parse_nsec_time(end_str, &ptime->end) != 0)) {
return -1;
}
return 0;
}
static int split_start_end(char **start, char **end, const char *ostr, char ch)
{
char *start_str, *end_str;
char *d, *str;
if (ostr == NULL || *ostr == '\0')
return 0;
/* copy original string because we need to modify it */
str = strdup(ostr);
if (str == NULL)
return -ENOMEM;
start_str = str;
d = strchr(start_str, ch);
if (d) {
*d = '\0';
++d;
}
end_str = d;
*start = start_str;
*end = end_str;
return 0;
}
int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
{
char *start_str = NULL, *end_str;
int rc;
rc = split_start_end(&start_str, &end_str, ostr, ',');
if (rc || !start_str)
return rc;
ptime->start = 0;
ptime->end = 0;
rc = parse_timestr_sec_nsec(ptime, start_str, end_str);
free(start_str);
/* make sure end time is after start time if it was given */
if (rc == 0 && ptime->end && ptime->end < ptime->start)
return -EINVAL;
pr_debug("start time %" PRIu64 ", ", ptime->start);
pr_debug("end time %" PRIu64 "\n", ptime->end);
return rc;
}
static int perf_time__parse_strs(struct perf_time_interval *ptime,
const char *ostr, int size)
{
const char *cp;
char *str, *arg, *p;
int i, num = 0, rc = 0;
/* Count the commas */
for (cp = ostr; *cp; cp++)
num += !!(*cp == ',');
if (!num)
return -EINVAL;
BUG_ON(num > size);
str = strdup(ostr);
if (!str)
return -ENOMEM;
/* Split the string and parse each piece, except the last */
for (i = 0, p = str; i < num - 1; i++) {
arg = p;
/* Find next comma, there must be one */
p = skip_spaces(strchr(p, ',') + 1);
/* Skip the value, must not contain space or comma */
while (*p && !isspace(*p)) {
if (*p++ == ',') {
rc = -EINVAL;
goto out;
}
}
/* Split and parse */
if (*p)
*p++ = 0;
rc = perf_time__parse_str(ptime + i, arg);
if (rc < 0)
goto out;
}
/* Parse the last piece */
rc = perf_time__parse_str(ptime + i, p);
if (rc < 0)
goto out;
/* Check there is no overlap */
for (i = 0; i < num - 1; i++) {
if (ptime[i].end >= ptime[i + 1].start) {
rc = -EINVAL;
goto out;
}
}
rc = num;
out:
free(str);
return rc;
}
static int parse_percent(double *pcnt, char *str)
{
char *c, *endptr;
double d;
c = strchr(str, '%');
if (c)
*c = '\0';
else
return -1;
d = strtod(str, &endptr);
if (endptr != str + strlen(str))
return -1;
*pcnt = d / 100.0;
return 0;
}
static int set_percent_time(struct perf_time_interval *ptime, double start_pcnt,
double end_pcnt, u64 start, u64 end)
{
u64 total = end - start;
if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
end_pcnt < 0.0 || end_pcnt > 1.0) {
return -1;
}
ptime->start = start + round(start_pcnt * total);
ptime->end = start + round(end_pcnt * total);
if (ptime->end > ptime->start && ptime->end != end)
ptime->end -= 1;
return 0;
}
static int percent_slash_split(char *str, struct perf_time_interval *ptime,
u64 start, u64 end)
{
char *p, *end_str;
double pcnt, start_pcnt, end_pcnt;
int i;
/*
* Example:
* 10%/2: select the second 10% slice and the third 10% slice
*/
/* We can modify this string since the original one is copied */
p = strchr(str, '/');
if (!p)
return -1;
*p = '\0';
if (parse_percent(&pcnt, str) < 0)
return -1;
p++;
i = (int)strtol(p, &end_str, 10);
if (*end_str)
return -1;
if (pcnt <= 0.0)
return -1;
start_pcnt = pcnt * (i - 1);
end_pcnt = pcnt * i;
return set_percent_time(ptime, start_pcnt, end_pcnt, start, end);
}
static int percent_dash_split(char *str, struct perf_time_interval *ptime,
u64 start, u64 end)
{
char *start_str = NULL, *end_str;
double start_pcnt, end_pcnt;
int ret;
/*
* Example: 0%-10%
*/
ret = split_start_end(&start_str, &end_str, str, '-');
if (ret || !start_str)
return ret;
if ((parse_percent(&start_pcnt, start_str) != 0) ||
(parse_percent(&end_pcnt, end_str) != 0)) {
free(start_str);
return -1;
}
free(start_str);
return set_percent_time(ptime, start_pcnt, end_pcnt, start, end);
}
typedef int (*time_pecent_split)(char *, struct perf_time_interval *,
u64 start, u64 end);
static int percent_comma_split(struct perf_time_interval *ptime_buf, int num,
const char *ostr, u64 start, u64 end,
time_pecent_split func)
{
char *str, *p1, *p2;
int len, ret, i = 0;
str = strdup(ostr);
if (str == NULL)
return -ENOMEM;
len = strlen(str);
p1 = str;
while (p1 < str + len) {
if (i >= num) {
free(str);
return -1;
}
p2 = strchr(p1, ',');
if (p2)
*p2 = '\0';
ret = (func)(p1, &ptime_buf[i], start, end);
if (ret < 0) {
free(str);
return -1;
}
pr_debug("start time %d: %" PRIu64 ", ", i, ptime_buf[i].start);
pr_debug("end time %d: %" PRIu64 "\n", i, ptime_buf[i].end);
i++;
if (p2)
p1 = p2 + 1;
else
break;
}
free(str);
return i;
}
static int one_percent_convert(struct perf_time_interval *ptime_buf,
const char *ostr, u64 start, u64 end, char *c)
{
char *str;
int len = strlen(ostr), ret;
/*
* c points to '%'.
* '%' should be the last character
*/
if (ostr + len - 1 != c)
return -1;
/*
* Construct a string like "xx%/1"
*/
str = malloc(len + 3);
if (str == NULL)
return -ENOMEM;
memcpy(str, ostr, len);
strcpy(str + len, "/1");
ret = percent_slash_split(str, ptime_buf, start, end);
if (ret == 0)
ret = 1;
free(str);
return ret;
}
int perf_time__percent_parse_str(struct perf_time_interval *ptime_buf, int num,
const char *ostr, u64 start, u64 end)
{
char *c;
/*
* ostr example:
* 10%/2,10%/3: select the second 10% slice and the third 10% slice
* 0%-10%,30%-40%: multiple time range
* 50%: just one percent
*/
memset(ptime_buf, 0, sizeof(*ptime_buf) * num);
c = strchr(ostr, '/');
if (c) {
return percent_comma_split(ptime_buf, num, ostr, start,
end, percent_slash_split);
}
c = strchr(ostr, '-');
if (c) {
return percent_comma_split(ptime_buf, num, ostr, start,
end, percent_dash_split);
}
c = strchr(ostr, '%');
if (c)
return one_percent_convert(ptime_buf, ostr, start, end, c);
return -1;
}
struct perf_time_interval *perf_time__range_alloc(const char *ostr, int *size)
{
const char *p1, *p2;
int i = 1;
struct perf_time_interval *ptime;
/*
* At least allocate one time range.
*/
if (!ostr)
goto alloc;
p1 = ostr;
while (p1 < ostr + strlen(ostr)) {
p2 = strchr(p1, ',');
if (!p2)
break;
p1 = p2 + 1;
i++;
}
alloc:
*size = i;
ptime = calloc(i, sizeof(*ptime));
return ptime;
}
bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
{
/* if time is not set don't drop sample */
if (timestamp == 0)
return false;
/* otherwise compare sample time to time window */
if ((ptime->start && timestamp < ptime->start) ||
(ptime->end && timestamp > ptime->end)) {
return true;
}
return false;
}
bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
int num, u64 timestamp)
{
struct perf_time_interval *ptime;
int i;
if ((!ptime_buf) || (timestamp == 0) || (num == 0))
return false;
if (num == 1)
return perf_time__skip_sample(&ptime_buf[0], timestamp);
/*
* start/end of multiple time ranges must be valid.
*/
for (i = 0; i < num; i++) {
ptime = &ptime_buf[i];
if (timestamp >= ptime->start &&
(timestamp <= ptime->end || !ptime->end)) {
return false;
}
}
return true;
}
int perf_time__parse_for_ranges_reltime(const char *time_str,
struct perf_session *session,
struct perf_time_interval **ranges,
int *range_size, int *range_num,
bool reltime)
{
bool has_percent = strchr(time_str, '%');
struct perf_time_interval *ptime_range;
int size, num, ret = -EINVAL;
ptime_range = perf_time__range_alloc(time_str, &size);
if (!ptime_range)
return -ENOMEM;
if (has_percent || reltime) {
if (session->evlist->first_sample_time == 0 &&
session->evlist->last_sample_time == 0) {
pr_err("HINT: no first/last sample time found in perf data.\n"
"Please use latest perf binary to execute 'perf record'\n"
"(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
goto error;
}
}
if (has_percent) {
num = perf_time__percent_parse_str(
ptime_range, size,
time_str,
session->evlist->first_sample_time,
session->evlist->last_sample_time);
} else {
num = perf_time__parse_strs(ptime_range, time_str, size);
}
if (num < 0)
goto error_invalid;
if (reltime) {
int i;
for (i = 0; i < num; i++) {
ptime_range[i].start += session->evlist->first_sample_time;
ptime_range[i].end += session->evlist->first_sample_time;
}
}
*range_size = size;
*range_num = num;
*ranges = ptime_range;
return 0;
error_invalid:
pr_err("Invalid time string\n");
error:
free(ptime_range);
return ret;
}
int perf_time__parse_for_ranges(const char *time_str,
struct perf_session *session,
struct perf_time_interval **ranges,
int *range_size, int *range_num)
{
return perf_time__parse_for_ranges_reltime(time_str, session, ranges,
range_size, range_num, false);
}
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
u64 usec = (timestamp % NSEC_PER_SEC) / NSEC_PER_USEC;
return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
}
int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC,
nsec = timestamp % NSEC_PER_SEC;
return scnprintf(buf, sz, "%" PRIu64 ".%09" PRIu64, sec, nsec);
}
int fetch_current_timestamp(char *buf, size_t sz)
{
struct timeval tv;
struct tm tm;
char dt[32];
if (gettimeofday(&tv, NULL) || !localtime_r(&tv.tv_sec, &tm))
return -1;
if (!strftime(dt, sizeof(dt), "%Y%m%d%H%M%S", &tm))
return -1;
scnprintf(buf, sz, "%s%02u", dt, (unsigned)tv.tv_usec / 10000);
return 0;
}
| linux-master | tools/perf/util/time-utils.c |
// SPDX-License-Identifier: GPL-2.0
#include "string2.h"
#include <linux/kernel.h>
#include <linux/string.h>
#include <stdlib.h>
#include <linux/ctype.h>
const char *graph_dotted_line =
"---------------------------------------------------------------------"
"---------------------------------------------------------------------"
"---------------------------------------------------------------------";
const char *dots =
"....................................................................."
"....................................................................."
".....................................................................";
/*
* perf_atoll()
* Parse (\d+)(b|B|kb|KB|mb|MB|gb|GB|tb|TB) (e.g. "256MB")
* and return its numeric value
*/
s64 perf_atoll(const char *str)
{
s64 length;
char *p;
char c;
if (!isdigit(str[0]))
goto out_err;
length = strtoll(str, &p, 10);
switch (c = *p++) {
case 'b': case 'B':
if (*p)
goto out_err;
fallthrough;
case '\0':
return length;
default:
goto out_err;
/* two-letter suffices */
case 'k': case 'K':
length <<= 10;
break;
case 'm': case 'M':
length <<= 20;
break;
case 'g': case 'G':
length <<= 30;
break;
case 't': case 'T':
length <<= 40;
break;
}
/* we want the cases to match */
if (islower(c)) {
if (strcmp(p, "b") != 0)
goto out_err;
} else {
if (strcmp(p, "B") != 0)
goto out_err;
}
return length;
out_err:
return -1;
}
/* Character class matching */
static bool __match_charclass(const char *pat, char c, const char **npat)
{
bool complement = false, ret = true;
if (*pat == '!') {
complement = true;
pat++;
}
if (*pat++ == c) /* First character is special */
goto end;
while (*pat && *pat != ']') { /* Matching */
if (*pat == '-' && *(pat + 1) != ']') { /* Range */
if (*(pat - 1) <= c && c <= *(pat + 1))
goto end;
if (*(pat - 1) > *(pat + 1))
goto error;
pat += 2;
} else if (*pat++ == c)
goto end;
}
if (!*pat)
goto error;
ret = false;
end:
while (*pat && *pat != ']') /* Searching closing */
pat++;
if (!*pat)
goto error;
*npat = pat + 1;
return complement ? !ret : ret;
error:
return false;
}
/* Glob/lazy pattern matching */
static bool __match_glob(const char *str, const char *pat, bool ignore_space,
bool case_ins)
{
while (*str && *pat && *pat != '*') {
if (ignore_space) {
/* Ignore spaces for lazy matching */
if (isspace(*str)) {
str++;
continue;
}
if (isspace(*pat)) {
pat++;
continue;
}
}
if (*pat == '?') { /* Matches any single character */
str++;
pat++;
continue;
} else if (*pat == '[') /* Character classes/Ranges */
if (__match_charclass(pat + 1, *str, &pat)) {
str++;
continue;
} else
return false;
else if (*pat == '\\') /* Escaped char match as normal char */
pat++;
if (case_ins) {
if (tolower(*str) != tolower(*pat))
return false;
} else if (*str != *pat)
return false;
str++;
pat++;
}
/* Check wild card */
if (*pat == '*') {
while (*pat == '*')
pat++;
if (!*pat) /* Tail wild card matches all */
return true;
while (*str)
if (__match_glob(str++, pat, ignore_space, case_ins))
return true;
}
return !*str && !*pat;
}
/**
* strglobmatch - glob expression pattern matching
* @str: the target string to match
* @pat: the pattern string to match
*
* This returns true if the @str matches @pat. @pat can includes wildcards
* ('*','?') and character classes ([CHARS], complementation and ranges are
* also supported). Also, this supports escape character ('\') to use special
* characters as normal character.
*
* Note: if @pat syntax is broken, this always returns false.
*/
bool strglobmatch(const char *str, const char *pat)
{
return __match_glob(str, pat, false, false);
}
bool strglobmatch_nocase(const char *str, const char *pat)
{
return __match_glob(str, pat, false, true);
}
/**
* strlazymatch - matching pattern strings lazily with glob pattern
* @str: the target string to match
* @pat: the pattern string to match
*
* This is similar to strglobmatch, except this ignores spaces in
* the target string.
*/
bool strlazymatch(const char *str, const char *pat)
{
return __match_glob(str, pat, true, false);
}
/**
* strtailcmp - Compare the tail of two strings
* @s1: 1st string to be compared
* @s2: 2nd string to be compared
*
* Return 0 if whole of either string is same as another's tail part.
*/
int strtailcmp(const char *s1, const char *s2)
{
int i1 = strlen(s1);
int i2 = strlen(s2);
while (--i1 >= 0 && --i2 >= 0) {
if (s1[i1] != s2[i2])
return s1[i1] - s2[i2];
}
return 0;
}
char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints)
{
/*
* FIXME: replace this with an expression using log10() when we
* find a suitable implementation, maybe the one in the dvb drivers...
*
* "%s == %d || " = log10(MAXINT) * 2 + 8 chars for the operators
*/
size_t size = nints * 28 + 1; /* \0 */
size_t i, printed = 0;
char *expr = malloc(size);
if (expr) {
const char *or_and = "||", *eq_neq = "==";
char *e = expr;
if (!in) {
or_and = "&&";
eq_neq = "!=";
}
for (i = 0; i < nints; ++i) {
if (printed == size)
goto out_err_overflow;
if (i > 0)
printed += scnprintf(e + printed, size - printed, " %s ", or_and);
printed += scnprintf(e + printed, size - printed,
"%s %s %d", var, eq_neq, ints[i]);
}
}
return expr;
out_err_overflow:
free(expr);
return NULL;
}
/* Like strpbrk(), but not break if it is right after a backslash (escaped) */
char *strpbrk_esc(char *str, const char *stopset)
{
char *ptr;
do {
ptr = strpbrk(str, stopset);
if (ptr == str ||
(ptr == str + 1 && *(ptr - 1) != '\\'))
break;
str = ptr + 1;
} while (ptr && *(ptr - 1) == '\\' && *(ptr - 2) != '\\');
return ptr;
}
/* Like strdup, but do not copy a single backslash */
char *strdup_esc(const char *str)
{
char *s, *d, *p, *ret = strdup(str);
if (!ret)
return NULL;
d = strchr(ret, '\\');
if (!d)
return ret;
s = d + 1;
do {
if (*s == '\0') {
*d = '\0';
break;
}
p = strchr(s + 1, '\\');
if (p) {
memmove(d, s, p - s);
d += p - s;
s = p + 1;
} else
memmove(d, s, strlen(s) + 1);
} while (p);
return ret;
}
unsigned int hex(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
return c - 'A' + 10;
}
| linux-master | tools/perf/util/string.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Post mortem Dwarf CFI based unwinding on top of regs and stack dumps.
*
* Lots of this code have been borrowed or heavily inspired from parts of
* the libunwind 0.99 code which are (amongst other contributors I may have
* forgotten):
*
* Copyright (C) 2002-2007 Hewlett-Packard Co
* Contributed by David Mosberger-Tang <[email protected]>
*
* And the bugs have been added by:
*
* Copyright (C) 2010, Frederic Weisbecker <[email protected]>
* Copyright (C) 2012, Jiri Olsa <[email protected]>
*
*/
#include <elf.h>
#include <errno.h>
#include <gelf.h>
#include <fcntl.h>
#include <inttypes.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <linux/list.h>
#include <linux/zalloc.h>
#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include <libunwind-ptrace.h>
#endif
#include "callchain.h"
#include "thread.h"
#include "session.h"
#include "perf_regs.h"
#include "unwind.h"
#include "map.h"
#include "symbol.h"
#include "debug.h"
#include "asm/bug.h"
#include "dso.h"
extern int
UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi,
int need_unwind_info, void *arg);
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
extern int
UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
unw_word_t ip,
unw_word_t segbase,
const char *obj_name, unw_word_t start,
unw_word_t end);
#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
#define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */
#define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */
/* Pointer-encoding formats: */
#define DW_EH_PE_omit 0xff
#define DW_EH_PE_ptr 0x00 /* pointer-sized unsigned value */
#define DW_EH_PE_udata4 0x03 /* unsigned 32-bit value */
#define DW_EH_PE_udata8 0x04 /* unsigned 64-bit value */
#define DW_EH_PE_sdata4 0x0b /* signed 32-bit value */
#define DW_EH_PE_sdata8 0x0c /* signed 64-bit value */
/* Pointer-encoding application: */
#define DW_EH_PE_absptr 0x00 /* absolute value */
#define DW_EH_PE_pcrel 0x10 /* rel. to addr. of encoded value */
/*
* The following are not documented by LSB v1.3, yet they are used by
* GCC, presumably they aren't documented by LSB since they aren't
* used on Linux:
*/
#define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */
#define DW_EH_PE_aligned 0x50 /* aligned pointer */
/* Flags intentionally not handled, since they're not needed:
* #define DW_EH_PE_indirect 0x80
* #define DW_EH_PE_uleb128 0x01
* #define DW_EH_PE_udata2 0x02
* #define DW_EH_PE_sleb128 0x09
* #define DW_EH_PE_sdata2 0x0a
* #define DW_EH_PE_textrel 0x20
* #define DW_EH_PE_datarel 0x30
*/
struct unwind_info {
struct perf_sample *sample;
struct machine *machine;
struct thread *thread;
bool best_effort;
};
#define dw_read(ptr, type, end) ({ \
type *__p = (type *) ptr; \
type __v; \
if ((__p + 1) > (type *) end) \
return -EINVAL; \
__v = *__p++; \
ptr = (typeof(ptr)) __p; \
__v; \
})
static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
u8 encoding)
{
u8 *cur = *p;
*val = 0;
switch (encoding) {
case DW_EH_PE_omit:
*val = 0;
goto out;
case DW_EH_PE_ptr:
*val = dw_read(cur, unsigned long, end);
goto out;
default:
break;
}
switch (encoding & DW_EH_PE_APPL_MASK) {
case DW_EH_PE_absptr:
break;
case DW_EH_PE_pcrel:
*val = (unsigned long) cur;
break;
default:
return -EINVAL;
}
if ((encoding & 0x07) == 0x00)
encoding |= DW_EH_PE_udata4;
switch (encoding & DW_EH_PE_FORMAT_MASK) {
case DW_EH_PE_sdata4:
*val += dw_read(cur, s32, end);
break;
case DW_EH_PE_udata4:
*val += dw_read(cur, u32, end);
break;
case DW_EH_PE_sdata8:
*val += dw_read(cur, s64, end);
break;
case DW_EH_PE_udata8:
*val += dw_read(cur, u64, end);
break;
default:
return -EINVAL;
}
out:
*p = cur;
return 0;
}
#define dw_read_encoded_value(ptr, end, enc) ({ \
u64 __v; \
if (__dw_read_encoded_value(&ptr, end, &__v, enc)) { \
return -EINVAL; \
} \
__v; \
})
static int elf_section_address_and_offset(int fd, const char *name, u64 *address, u64 *offset)
{
Elf *elf;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
int ret = -1;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
return -1;
if (gelf_getehdr(elf, &ehdr) == NULL)
goto out_err;
if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
goto out_err;
*address = shdr.sh_addr;
*offset = shdr.sh_offset;
ret = 0;
out_err:
elf_end(elf);
return ret;
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
static u64 elf_section_offset(int fd, const char *name)
{
u64 address, offset = 0;
if (elf_section_address_and_offset(fd, name, &address, &offset))
return 0;
return offset;
}
#endif
static u64 elf_base_address(int fd)
{
Elf *elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
GElf_Phdr phdr;
u64 retval = 0;
size_t i, phdrnum = 0;
if (elf == NULL)
return 0;
(void)elf_getphdrnum(elf, &phdrnum);
/* PT_LOAD segments are sorted by p_vaddr, so the first has the minimum p_vaddr. */
for (i = 0; i < phdrnum; i++) {
if (gelf_getphdr(elf, i, &phdr) && phdr.p_type == PT_LOAD) {
retval = phdr.p_vaddr & -getpagesize();
break;
}
}
elf_end(elf);
return retval;
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
static int elf_is_exec(int fd, const char *name)
{
Elf *elf;
GElf_Ehdr ehdr;
int retval = 0;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
return 0;
if (gelf_getehdr(elf, &ehdr) == NULL)
goto out;
retval = (ehdr.e_type == ET_EXEC);
out:
elf_end(elf);
pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
return retval;
}
#endif
struct table_entry {
u32 start_ip_offset;
u32 fde_offset;
};
struct eh_frame_hdr {
unsigned char version;
unsigned char eh_frame_ptr_enc;
unsigned char fde_count_enc;
unsigned char table_enc;
/*
* The rest of the header is variable-length and consists of the
* following members:
*
* encoded_t eh_frame_ptr;
* encoded_t fde_count;
*/
/* A single encoded pointer should not be more than 8 bytes. */
u64 enc[2];
/*
* struct {
* encoded_t start_ip;
* encoded_t fde_addr;
* } binary_search_table[fde_count];
*/
char data[];
} __packed;
static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
u64 offset, u64 *table_data_offset, u64 *fde_count)
{
struct eh_frame_hdr hdr;
u8 *enc = (u8 *) &hdr.enc;
u8 *end = (u8 *) &hdr.data;
ssize_t r;
r = dso__data_read_offset(dso, machine, offset,
(u8 *) &hdr, sizeof(hdr));
if (r != sizeof(hdr))
return -EINVAL;
/* We dont need eh_frame_ptr, just skip it. */
dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
*fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
*table_data_offset = enc - (u8 *) &hdr;
return 0;
}
static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
u64 *table_data, u64 *segbase,
u64 *fde_count)
{
struct map_rb_node *map_node;
u64 base_addr = UINT64_MAX;
int ret, fd;
if (dso->data.eh_frame_hdr_offset == 0) {
fd = dso__data_get_fd(dso, ui->machine);
if (fd < 0)
return -EINVAL;
/* Check the .eh_frame section for unwinding info */
ret = elf_section_address_and_offset(fd, ".eh_frame_hdr",
&dso->data.eh_frame_hdr_addr,
&dso->data.eh_frame_hdr_offset);
dso->data.elf_base_addr = elf_base_address(fd);
dso__data_put_fd(dso);
if (ret || dso->data.eh_frame_hdr_offset == 0)
return -EINVAL;
}
maps__for_each_entry(thread__maps(ui->thread), map_node) {
struct map *map = map_node->map;
u64 start = map__start(map);
if (map__dso(map) == dso && start < base_addr)
base_addr = start;
}
base_addr -= dso->data.elf_base_addr;
/* Address of .eh_frame_hdr */
*segbase = base_addr + dso->data.eh_frame_hdr_addr;
ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset,
table_data, fde_count);
if (ret)
return ret;
/* binary_search_table offset plus .eh_frame_hdr address */
*table_data += *segbase;
return 0;
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
static int read_unwind_spec_debug_frame(struct dso *dso,
struct machine *machine, u64 *offset)
{
int fd;
u64 ofs = dso->data.debug_frame_offset;
/* debug_frame can reside in:
* - dso
* - debug pointed by symsrc_filename
* - gnu_debuglink, which doesn't necessary
* has to be pointed by symsrc_filename
*/
if (ofs == 0) {
fd = dso__data_get_fd(dso, machine);
if (fd >= 0) {
ofs = elf_section_offset(fd, ".debug_frame");
dso__data_put_fd(dso);
}
if (ofs <= 0) {
fd = open(dso->symsrc_filename, O_RDONLY);
if (fd >= 0) {
ofs = elf_section_offset(fd, ".debug_frame");
close(fd);
}
}
if (ofs <= 0) {
char *debuglink = malloc(PATH_MAX);
int ret = 0;
ret = dso__read_binary_type_filename(
dso, DSO_BINARY_TYPE__DEBUGLINK,
machine->root_dir, debuglink, PATH_MAX);
if (!ret) {
fd = open(debuglink, O_RDONLY);
if (fd >= 0) {
ofs = elf_section_offset(fd,
".debug_frame");
close(fd);
}
}
if (ofs > 0) {
if (dso->symsrc_filename != NULL) {
pr_warning(
"%s: overwrite symsrc(%s,%s)\n",
__func__,
dso->symsrc_filename,
debuglink);
zfree(&dso->symsrc_filename);
}
dso->symsrc_filename = debuglink;
} else {
free(debuglink);
}
}
dso->data.debug_frame_offset = ofs;
}
*offset = ofs;
if (*offset)
return 0;
return -EINVAL;
}
#endif
static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
{
struct addr_location al;
struct map *ret;
addr_location__init(&al);
thread__find_map(ui->thread, PERF_RECORD_MISC_USER, ip, &al);
ret = map__get(al.map);
addr_location__exit(&al);
return ret;
}
static int
find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
int need_unwind_info, void *arg)
{
struct unwind_info *ui = arg;
struct map *map;
struct dso *dso;
unw_dyn_info_t di;
u64 table_data, segbase, fde_count;
int ret = -EINVAL;
map = find_map(ip, ui);
if (!map)
return -EINVAL;
dso = map__dso(map);
if (!dso) {
map__put(map);
return -EINVAL;
}
pr_debug("unwind: find_proc_info dso %s\n", dso->name);
/* Check the .eh_frame section for unwinding info */
if (!read_unwind_spec_eh_frame(dso, ui, &table_data, &segbase, &fde_count)) {
memset(&di, 0, sizeof(di));
di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
di.start_ip = map__start(map);
di.end_ip = map__end(map);
di.u.rti.segbase = segbase;
di.u.rti.table_data = table_data;
di.u.rti.table_len = fde_count * sizeof(struct table_entry)
/ sizeof(unw_word_t);
ret = dwarf_search_unwind_table(as, ip, &di, pi,
need_unwind_info, arg);
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
/* Check the .debug_frame section for unwinding info */
if (ret < 0 &&
!read_unwind_spec_debug_frame(dso, ui->machine, &segbase)) {
int fd = dso__data_get_fd(dso, ui->machine);
int is_exec = elf_is_exec(fd, dso->name);
u64 start = map__start(map);
unw_word_t base = is_exec ? 0 : start;
const char *symfile;
if (fd >= 0)
dso__data_put_fd(dso);
symfile = dso->symsrc_filename ?: dso->name;
memset(&di, 0, sizeof(di));
if (dwarf_find_debug_frame(0, &di, ip, base, symfile, start, map__end(map)))
ret = dwarf_search_unwind_table(as, ip, &di, pi,
need_unwind_info, arg);
}
#endif
map__put(map);
return ret;
}
static int access_fpreg(unw_addr_space_t __maybe_unused as,
unw_regnum_t __maybe_unused num,
unw_fpreg_t __maybe_unused *val,
int __maybe_unused __write,
void __maybe_unused *arg)
{
pr_err("unwind: access_fpreg unsupported\n");
return -UNW_EINVAL;
}
static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as,
unw_word_t __maybe_unused *dil_addr,
void __maybe_unused *arg)
{
return -UNW_ENOINFO;
}
static int resume(unw_addr_space_t __maybe_unused as,
unw_cursor_t __maybe_unused *cu,
void __maybe_unused *arg)
{
pr_err("unwind: resume unsupported\n");
return -UNW_EINVAL;
}
static int
get_proc_name(unw_addr_space_t __maybe_unused as,
unw_word_t __maybe_unused addr,
char __maybe_unused *bufp, size_t __maybe_unused buf_len,
unw_word_t __maybe_unused *offp, void __maybe_unused *arg)
{
pr_err("unwind: get_proc_name unsupported\n");
return -UNW_EINVAL;
}
static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
unw_word_t *data)
{
struct map *map;
struct dso *dso;
ssize_t size;
map = find_map(addr, ui);
if (!map) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
return -1;
}
dso = map__dso(map);
if (!dso) {
map__put(map);
return -1;
}
size = dso__data_read_addr(dso, map, ui->machine,
addr, (u8 *) data, sizeof(*data));
map__put(map);
return !(size == sizeof(*data));
}
static int access_mem(unw_addr_space_t __maybe_unused as,
unw_word_t addr, unw_word_t *valp,
int __write, void *arg)
{
struct unwind_info *ui = arg;
const char *arch = perf_env__arch(ui->machine->env);
struct stack_dump *stack = &ui->sample->user_stack;
u64 start, end;
int offset;
int ret;
/* Don't support write, probably not needed. */
if (__write || !stack || !ui->sample->user_regs.regs) {
*valp = 0;
return 0;
}
ret = perf_reg_value(&start, &ui->sample->user_regs,
perf_arch_reg_sp(arch));
if (ret)
return ret;
end = start + stack->size;
/* Check overflow. */
if (addr + sizeof(unw_word_t) < addr)
return -EINVAL;
if (addr < start || addr + sizeof(unw_word_t) >= end) {
ret = access_dso_mem(ui, addr, valp);
if (ret) {
pr_debug("unwind: access_mem %p not inside range"
" 0x%" PRIx64 "-0x%" PRIx64 "\n",
(void *) (uintptr_t) addr, start, end);
*valp = 0;
return ret;
}
return 0;
}
offset = addr - start;
*valp = *(unw_word_t *)&stack->data[offset];
pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
(void *) (uintptr_t) addr, (unsigned long)*valp, offset);
return 0;
}
static int access_reg(unw_addr_space_t __maybe_unused as,
unw_regnum_t regnum, unw_word_t *valp,
int __write, void *arg)
{
struct unwind_info *ui = arg;
int id, ret;
u64 val;
/* Don't support write, I suspect we don't need it. */
if (__write) {
pr_err("unwind: access_reg w %d\n", regnum);
return 0;
}
if (!ui->sample->user_regs.regs) {
*valp = 0;
return 0;
}
id = LIBUNWIND__ARCH_REG_ID(regnum);
if (id < 0)
return -EINVAL;
ret = perf_reg_value(&val, &ui->sample->user_regs, id);
if (ret) {
if (!ui->best_effort)
pr_err("unwind: can't read reg %d\n", regnum);
return ret;
}
*valp = (unw_word_t) val;
pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp);
return 0;
}
static void put_unwind_info(unw_addr_space_t __maybe_unused as,
unw_proc_info_t *pi __maybe_unused,
void *arg __maybe_unused)
{
pr_debug("unwind: put_unwind_info called\n");
}
static int entry(u64 ip, struct thread *thread,
unwind_entry_cb_t cb, void *arg)
{
struct unwind_entry e;
struct addr_location al;
int ret;
addr_location__init(&al);
e.ms.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
e.ip = ip;
e.ms.map = al.map;
e.ms.maps = al.maps;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
al.sym ? al.sym->name : "''",
ip,
al.map ? map__map_ip(al.map, ip) : (u64) 0);
ret = cb(&e, arg);
addr_location__exit(&al);
return ret;
}
static void display_error(int err)
{
switch (err) {
case UNW_EINVAL:
pr_err("unwind: Only supports local.\n");
break;
case UNW_EUNSPEC:
pr_err("unwind: Unspecified error.\n");
break;
case UNW_EBADREG:
pr_err("unwind: Register unavailable.\n");
break;
default:
break;
}
}
static unw_accessors_t accessors = {
.find_proc_info = find_proc_info,
.put_unwind_info = put_unwind_info,
.get_dyn_info_list_addr = get_dyn_info_list_addr,
.access_mem = access_mem,
.access_reg = access_reg,
.access_fpreg = access_fpreg,
.resume = resume,
.get_proc_name = get_proc_name,
};
static int _unwind__prepare_access(struct maps *maps)
{
void *addr_space = unw_create_addr_space(&accessors, 0);
RC_CHK_ACCESS(maps)->addr_space = addr_space;
if (!addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
return -ENOMEM;
}
unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL);
return 0;
}
static void _unwind__flush_access(struct maps *maps)
{
unw_flush_cache(maps__addr_space(maps), 0, 0);
}
static void _unwind__finish_access(struct maps *maps)
{
unw_destroy_addr_space(maps__addr_space(maps));
}
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
void *arg, int max_stack)
{
const char *arch = perf_env__arch(ui->machine->env);
u64 val;
unw_word_t ips[max_stack];
unw_addr_space_t addr_space;
unw_cursor_t c;
int ret, i = 0;
ret = perf_reg_value(&val, &ui->sample->user_regs,
perf_arch_reg_ip(arch));
if (ret)
return ret;
ips[i++] = (unw_word_t) val;
/*
* If we need more than one entry, do the DWARF
* unwind itself.
*/
if (max_stack - 1 > 0) {
WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
addr_space = maps__addr_space(thread__maps(ui->thread));
if (addr_space == NULL)
return -1;
ret = unw_init_remote(&c, addr_space, ui);
if (ret && !ui->best_effort)
display_error(ret);
while (!ret && (unw_step(&c) > 0) && i < max_stack) {
unw_get_reg(&c, UNW_REG_IP, &ips[i]);
/*
* Decrement the IP for any non-activation frames.
* this is required to properly find the srcline
* for caller frames.
* See also the documentation for dwfl_frame_pc(),
* which this code tries to replicate.
*/
if (unw_is_signal_frame(&c) <= 0)
--ips[i];
++i;
}
max_stack = i;
}
/*
* Display what we got based on the order setup.
*/
for (i = 0; i < max_stack && !ret; i++) {
int j = i;
if (callchain_param.order == ORDER_CALLER)
j = max_stack - i - 1;
ret = ips[j] ? entry(ips[j], ui->thread, cb, arg) : 0;
}
return ret;
}
static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data, int max_stack,
bool best_effort)
{
struct unwind_info ui = {
.sample = data,
.thread = thread,
.machine = maps__machine(thread__maps(thread)),
.best_effort = best_effort
};
if (!data->user_regs.regs)
return -EINVAL;
if (max_stack <= 0)
return -EINVAL;
return get_entries(&ui, cb, arg, max_stack);
}
static struct unwind_libunwind_ops
_unwind_libunwind_ops = {
.prepare_access = _unwind__prepare_access,
.flush_access = _unwind__flush_access,
.finish_access = _unwind__finish_access,
.get_entries = _unwind__get_entries,
};
#ifndef REMOTE_UNWIND_LIBUNWIND
struct unwind_libunwind_ops *
local_unwind_libunwind_ops = &_unwind_libunwind_ops;
#endif
| linux-master | tools/perf/util/unwind-libunwind-local.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
*/
#include <inttypes.h>
#include <linux/perf_event.h>
#include <stdio.h>
#include <stdbool.h>
#include <errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include "session.h"
#include "machine.h"
#include "memswap.h"
#include "sort.h"
#include "tool.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "map.h"
#include "color.h"
#include "thread.h"
#include "thread-stack.h"
#include "symbol.h"
#include "callchain.h"
#include "dso.h"
#include "debug.h"
#include "auxtrace.h"
#include "tsc.h"
#include "intel-pt.h"
#include "config.h"
#include "util/perf_api_probe.h"
#include "util/synthetic-events.h"
#include "time-utils.h"
#include "../arch/x86/include/uapi/asm/perf_regs.h"
#include "intel-pt-decoder/intel-pt-log.h"
#include "intel-pt-decoder/intel-pt-decoder.h"
#include "intel-pt-decoder/intel-pt-insn-decoder.h"
#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
#define MAX_TIMESTAMP (~0ULL)
#define INTEL_PT_CFG_PASS_THRU BIT_ULL(0)
#define INTEL_PT_CFG_PWR_EVT_EN BIT_ULL(4)
#define INTEL_PT_CFG_BRANCH_EN BIT_ULL(13)
#define INTEL_PT_CFG_EVT_EN BIT_ULL(31)
#define INTEL_PT_CFG_TNT_DIS BIT_ULL(55)
struct range {
u64 start;
u64 end;
};
struct intel_pt {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
struct auxtrace_heap heap;
u32 auxtrace_type;
struct perf_session *session;
struct machine *machine;
struct evsel *switch_evsel;
struct thread *unknown_thread;
bool timeless_decoding;
bool sampling_mode;
bool snapshot_mode;
bool per_cpu_mmaps;
bool have_tsc;
bool data_queued;
bool est_tsc;
bool sync_switch;
bool sync_switch_not_supported;
bool mispred_all;
bool use_thread_stack;
bool callstack;
bool cap_event_trace;
bool have_guest_sideband;
unsigned int br_stack_sz;
unsigned int br_stack_sz_plus;
int have_sched_switch;
u32 pmu_type;
u64 kernel_start;
u64 switch_ip;
u64 ptss_ip;
u64 first_timestamp;
struct perf_tsc_conversion tc;
bool cap_user_time_zero;
struct itrace_synth_opts synth_opts;
bool sample_instructions;
u64 instructions_sample_type;
u64 instructions_id;
bool sample_cycles;
u64 cycles_sample_type;
u64 cycles_id;
bool sample_branches;
u32 branches_filter;
u64 branches_sample_type;
u64 branches_id;
bool sample_transactions;
u64 transactions_sample_type;
u64 transactions_id;
bool sample_ptwrites;
u64 ptwrites_sample_type;
u64 ptwrites_id;
bool sample_pwr_events;
u64 pwr_events_sample_type;
u64 mwait_id;
u64 pwre_id;
u64 exstop_id;
u64 pwrx_id;
u64 cbr_id;
u64 psb_id;
bool single_pebs;
bool sample_pebs;
struct evsel *pebs_evsel;
u64 evt_sample_type;
u64 evt_id;
u64 iflag_chg_sample_type;
u64 iflag_chg_id;
u64 tsc_bit;
u64 mtc_bit;
u64 mtc_freq_bits;
u32 tsc_ctc_ratio_n;
u32 tsc_ctc_ratio_d;
u64 cyc_bit;
u64 noretcomp_bit;
unsigned max_non_turbo_ratio;
unsigned cbr2khz;
int max_loops;
unsigned long num_events;
char *filter;
struct addr_filters filts;
struct range *time_ranges;
unsigned int range_cnt;
struct ip_callchain *chain;
struct branch_stack *br_stack;
u64 dflt_tsc_offset;
struct rb_root vmcs_info;
};
enum switch_state {
INTEL_PT_SS_NOT_TRACING,
INTEL_PT_SS_UNKNOWN,
INTEL_PT_SS_TRACING,
INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
INTEL_PT_SS_EXPECTING_SWITCH_IP,
};
/* applicable_counters is 64-bits */
#define INTEL_PT_MAX_PEBS 64
struct intel_pt_pebs_event {
struct evsel *evsel;
u64 id;
};
struct intel_pt_queue {
struct intel_pt *pt;
unsigned int queue_nr;
struct auxtrace_buffer *buffer;
struct auxtrace_buffer *old_buffer;
void *decoder;
const struct intel_pt_state *state;
struct ip_callchain *chain;
struct branch_stack *last_branch;
union perf_event *event_buf;
bool on_heap;
bool stop;
bool step_through_buffers;
bool use_buffer_pid_tid;
bool sync_switch;
bool sample_ipc;
pid_t pid, tid;
int cpu;
int switch_state;
pid_t next_tid;
struct thread *thread;
struct machine *guest_machine;
struct thread *guest_thread;
struct thread *unknown_guest_thread;
pid_t guest_machine_pid;
pid_t guest_pid;
pid_t guest_tid;
int vcpu;
bool exclude_kernel;
bool have_sample;
u64 time;
u64 timestamp;
u64 sel_timestamp;
bool sel_start;
unsigned int sel_idx;
u32 flags;
u16 insn_len;
u64 last_insn_cnt;
u64 ipc_insn_cnt;
u64 ipc_cyc_cnt;
u64 last_in_insn_cnt;
u64 last_in_cyc_cnt;
u64 last_cy_insn_cnt;
u64 last_cy_cyc_cnt;
u64 last_br_insn_cnt;
u64 last_br_cyc_cnt;
unsigned int cbr_seen;
char insn[INTEL_PT_INSN_BUF_SZ];
struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
};
static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
unsigned char *buf, size_t len)
{
struct intel_pt_pkt packet;
size_t pos = 0;
int ret, pkt_len, i;
char desc[INTEL_PT_PKT_DESC_MAX];
const char *color = PERF_COLOR_BLUE;
enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
color_fprintf(stdout, color,
". ... Intel Processor Trace data: size %zu bytes\n",
len);
while (len) {
ret = intel_pt_get_packet(buf, len, &packet, &ctx);
if (ret > 0)
pkt_len = ret;
else
pkt_len = 1;
printf(".");
color_fprintf(stdout, color, " %08x: ", pos);
for (i = 0; i < pkt_len; i++)
color_fprintf(stdout, color, " %02x", buf[i]);
for (; i < 16; i++)
color_fprintf(stdout, color, " ");
if (ret > 0) {
ret = intel_pt_pkt_desc(&packet, desc,
INTEL_PT_PKT_DESC_MAX);
if (ret > 0)
color_fprintf(stdout, color, " %s\n", desc);
} else {
color_fprintf(stdout, color, " Bad packet!\n");
}
pos += pkt_len;
buf += pkt_len;
len -= pkt_len;
}
}
static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
size_t len)
{
printf(".\n");
intel_pt_dump(pt, buf, len);
}
static void intel_pt_log_event(union perf_event *event)
{
FILE *f = intel_pt_log_fp();
if (!intel_pt_enable_logging || !f)
return;
perf_event__fprintf(event, NULL, f);
}
static void intel_pt_dump_sample(struct perf_session *session,
struct perf_sample *sample)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
printf("\n");
intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
}
static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
{
struct perf_time_interval *range = pt->synth_opts.ptime_range;
int n = pt->synth_opts.range_num;
if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
return true;
if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
return false;
/* perf_time__ranges_skip_sample does not work if time is zero */
if (!tm)
tm = 1;
return !n || !perf_time__ranges_skip_sample(range, n, tm);
}
static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
u64 vmcs,
u64 dflt_tsc_offset)
{
struct rb_node **p = &rb_root->rb_node;
struct rb_node *parent = NULL;
struct intel_pt_vmcs_info *v;
while (*p) {
parent = *p;
v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
if (v->vmcs == vmcs)
return v;
if (vmcs < v->vmcs)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
v = zalloc(sizeof(*v));
if (v) {
v->vmcs = vmcs;
v->tsc_offset = dflt_tsc_offset;
v->reliable = dflt_tsc_offset;
rb_link_node(&v->rb_node, parent, p);
rb_insert_color(&v->rb_node, rb_root);
}
return v;
}
static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
{
struct intel_pt_queue *ptq = data;
struct intel_pt *pt = ptq->pt;
if (!vmcs && !pt->dflt_tsc_offset)
return NULL;
return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
}
static void intel_pt_free_vmcs_info(struct intel_pt *pt)
{
struct intel_pt_vmcs_info *v;
struct rb_node *n;
n = rb_first(&pt->vmcs_info);
while (n) {
v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
n = rb_next(n);
rb_erase(&v->rb_node, &pt->vmcs_info);
free(v);
}
}
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
bool consecutive = false;
void *start;
start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
pt->have_tsc, &consecutive,
pt->synth_opts.vm_time_correlation);
if (!start)
return -EINVAL;
/*
* In the case of vm_time_correlation, the overlap might contain TSC
* packets that will not be fixed, and that will then no longer work for
* overlap detection. Avoid that by zeroing out the overlap.
*/
if (pt->synth_opts.vm_time_correlation)
memset(b->data, 0, start - b->data);
b->use_size = b->data + b->size - start;
b->use_data = start;
if (b->use_size && consecutive)
b->consecutive = true;
return 0;
}
static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
struct auxtrace_buffer *buffer,
struct auxtrace_buffer *old_buffer,
struct intel_pt_buffer *b)
{
bool might_overlap;
if (!buffer->data) {
int fd = perf_data__fd(ptq->pt->session->data);
buffer->data = auxtrace_buffer__get_data(buffer, fd);
if (!buffer->data)
return -ENOMEM;
}
might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
if (might_overlap && !buffer->consecutive && old_buffer &&
intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
return -ENOMEM;
if (buffer->use_data) {
b->len = buffer->use_size;
b->buf = buffer->use_data;
} else {
b->len = buffer->size;
b->buf = buffer->data;
}
b->ref_timestamp = buffer->reference;
if (!old_buffer || (might_overlap && !buffer->consecutive)) {
b->consecutive = false;
b->trace_nr = buffer->buffer_nr + 1;
} else {
b->consecutive = true;
}
return 0;
}
/* Do not drop buffers with references - refer intel_pt_get_trace() */
static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
struct auxtrace_buffer *buffer)
{
if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
return;
auxtrace_buffer__drop_data(buffer);
}
/* Must be serialized with respect to intel_pt_get_trace() */
static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
void *cb_data)
{
struct intel_pt_queue *ptq = data;
struct auxtrace_buffer *buffer = ptq->buffer;
struct auxtrace_buffer *old_buffer = ptq->old_buffer;
struct auxtrace_queue *queue;
int err = 0;
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
while (1) {
struct intel_pt_buffer b = { .len = 0 };
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer)
break;
err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
if (err)
break;
if (b.len) {
intel_pt_lookahead_drop_buffer(ptq, old_buffer);
old_buffer = buffer;
} else {
intel_pt_lookahead_drop_buffer(ptq, buffer);
continue;
}
err = cb(&b, cb_data);
if (err)
break;
}
if (buffer != old_buffer)
intel_pt_lookahead_drop_buffer(ptq, buffer);
intel_pt_lookahead_drop_buffer(ptq, old_buffer);
return err;
}
/*
* This function assumes data is processed sequentially only.
* Must be serialized with respect to intel_pt_lookahead()
*/
static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
{
struct intel_pt_queue *ptq = data;
struct auxtrace_buffer *buffer = ptq->buffer;
struct auxtrace_buffer *old_buffer = ptq->old_buffer;
struct auxtrace_queue *queue;
int err;
if (ptq->stop) {
b->len = 0;
return 0;
}
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
b->len = 0;
return 0;
}
ptq->buffer = buffer;
err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
if (err)
return err;
if (ptq->step_through_buffers)
ptq->stop = true;
if (b->len) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
ptq->old_buffer = buffer;
} else {
auxtrace_buffer__drop_data(buffer);
return intel_pt_get_trace(b, data);
}
return 0;
}
struct intel_pt_cache_entry {
struct auxtrace_cache_entry entry;
u64 insn_cnt;
u64 byte_cnt;
enum intel_pt_insn_op op;
enum intel_pt_insn_branch branch;
bool emulated_ptwrite;
int length;
int32_t rel;
char insn[INTEL_PT_INSN_BUF_SZ];
};
static int intel_pt_config_div(const char *var, const char *value, void *data)
{
int *d = data;
long val;
if (!strcmp(var, "intel-pt.cache-divisor")) {
val = strtol(value, NULL, 0);
if (val > 0 && val <= INT_MAX)
*d = val;
}
return 0;
}
static int intel_pt_cache_divisor(void)
{
static int d;
if (d)
return d;
perf_config(intel_pt_config_div, &d);
if (!d)
d = 64;
return d;
}
static unsigned int intel_pt_cache_size(struct dso *dso,
struct machine *machine)
{
off_t size;
size = dso__data_size(dso, machine);
size /= intel_pt_cache_divisor();
if (size < 1000)
return 10;
if (size > (1 << 21))
return 21;
return 32 - __builtin_clz(size);
}
static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
struct machine *machine)
{
struct auxtrace_cache *c;
unsigned int bits;
if (dso->auxtrace_cache)
return dso->auxtrace_cache;
bits = intel_pt_cache_size(dso, machine);
/* Ignoring cache creation failure */
c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
dso->auxtrace_cache = c;
return c;
}
static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
u64 offset, u64 insn_cnt, u64 byte_cnt,
struct intel_pt_insn *intel_pt_insn)
{
struct auxtrace_cache *c = intel_pt_cache(dso, machine);
struct intel_pt_cache_entry *e;
int err;
if (!c)
return -ENOMEM;
e = auxtrace_cache__alloc_entry(c);
if (!e)
return -ENOMEM;
e->insn_cnt = insn_cnt;
e->byte_cnt = byte_cnt;
e->op = intel_pt_insn->op;
e->branch = intel_pt_insn->branch;
e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite;
e->length = intel_pt_insn->length;
e->rel = intel_pt_insn->rel;
memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
err = auxtrace_cache__add(c, offset, &e->entry);
if (err)
auxtrace_cache__free_entry(c, e);
return err;
}
static struct intel_pt_cache_entry *
intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
{
struct auxtrace_cache *c = intel_pt_cache(dso, machine);
if (!c)
return NULL;
return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
}
static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
u64 offset)
{
struct auxtrace_cache *c = intel_pt_cache(dso, machine);
if (!c)
return;
auxtrace_cache__remove(dso->auxtrace_cache, offset);
}
static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
{
/* Assumes 64-bit kernel */
return ip & (1ULL << 63);
}
static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
{
if (nr) {
return intel_pt_guest_kernel_ip(ip) ?
PERF_RECORD_MISC_GUEST_KERNEL :
PERF_RECORD_MISC_GUEST_USER;
}
return ip >= ptq->pt->kernel_start ?
PERF_RECORD_MISC_KERNEL :
PERF_RECORD_MISC_USER;
}
static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
{
/* No support for non-zero CS base */
if (from_ip)
return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
}
static int intel_pt_get_guest(struct intel_pt_queue *ptq)
{
struct machines *machines = &ptq->pt->session->machines;
struct machine *machine;
pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
if (ptq->guest_machine && pid == ptq->guest_machine->pid)
return 0;
ptq->guest_machine = NULL;
thread__zput(ptq->unknown_guest_thread);
if (symbol_conf.guest_code) {
thread__zput(ptq->guest_thread);
ptq->guest_thread = machines__findnew_guest_code(machines, pid);
}
machine = machines__find_guest(machines, pid);
if (!machine)
return -1;
ptq->unknown_guest_thread = machine__idle_thread(machine);
if (!ptq->unknown_guest_thread)
return -1;
ptq->guest_machine = machine;
return 0;
}
static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn)
{
return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL;
}
#define PTWRITE_MAGIC "\x0f\x0bperf,ptwrite "
#define PTWRITE_MAGIC_LEN 16
static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset)
{
unsigned char buf[PTWRITE_MAGIC_LEN];
ssize_t len;
len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN);
if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) {
intel_pt_log("Emulated ptwrite signature found\n");
return true;
}
intel_pt_log("Emulated ptwrite signature not found\n");
return false;
}
static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
uint64_t *insn_cnt_ptr, uint64_t *ip,
uint64_t to_ip, uint64_t max_insn_cnt,
void *data)
{
struct intel_pt_queue *ptq = data;
struct machine *machine = ptq->pt->machine;
struct thread *thread;
struct addr_location al;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
int x86_64, ret = 0;
u8 cpumode;
u64 offset, start_offset, start_ip;
u64 insn_cnt = 0;
bool one_map = true;
bool nr;
addr_location__init(&al);
intel_pt_insn->length = 0;
if (to_ip && *ip == to_ip)
goto out_no_cache;
nr = ptq->state->to_nr;
cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
if (nr) {
if (ptq->pt->have_guest_sideband) {
if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
intel_pt_log("ERROR: guest sideband but no guest machine\n");
ret = -EINVAL;
goto out_ret;
}
} else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
intel_pt_get_guest(ptq)) {
intel_pt_log("ERROR: no guest machine\n");
ret = -EINVAL;
goto out_ret;
}
machine = ptq->guest_machine;
thread = ptq->guest_thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) {
intel_pt_log("ERROR: no guest thread\n");
ret = -EINVAL;
goto out_ret;
}
thread = ptq->unknown_guest_thread;
}
} else {
thread = ptq->thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_KERNEL) {
intel_pt_log("ERROR: no thread\n");
ret = -EINVAL;
goto out_ret;
}
thread = ptq->pt->unknown_thread;
}
}
while (1) {
struct dso *dso;
if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) {
if (al.map)
intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n", *ip);
else
intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n", *ip);
addr_location__exit(&al);
ret = -EINVAL;
goto out_ret;
}
dso = map__dso(al.map);
if (dso->data.status == DSO_DATA_STATUS_ERROR &&
dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) {
ret = -ENOENT;
goto out_ret;
}
offset = map__map_ip(al.map, *ip);
if (!to_ip && one_map) {
struct intel_pt_cache_entry *e;
e = intel_pt_cache_lookup(dso, machine, offset);
if (e &&
(!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
*insn_cnt_ptr = e->insn_cnt;
*ip += e->byte_cnt;
intel_pt_insn->op = e->op;
intel_pt_insn->branch = e->branch;
intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite;
intel_pt_insn->length = e->length;
intel_pt_insn->rel = e->rel;
memcpy(intel_pt_insn->buf, e->insn, INTEL_PT_INSN_BUF_SZ);
intel_pt_log_insn_no_data(intel_pt_insn, *ip);
ret = 0;
goto out_ret;
}
}
start_offset = offset;
start_ip = *ip;
/* Load maps to ensure dso->is_64_bit has been updated */
map__load(al.map);
x86_64 = dso->is_64_bit;
while (1) {
len = dso__data_read_offset(dso, machine,
offset, buf,
INTEL_PT_INSN_BUF_SZ);
if (len <= 0) {
intel_pt_log("ERROR: failed to read at offset %#" PRIx64 " ",
offset);
if (intel_pt_enable_logging)
dso__fprintf(dso, intel_pt_log_fp());
ret = -EINVAL;
goto out_ret;
}
if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) {
ret = -EINVAL;
goto out_ret;
}
intel_pt_log_insn(intel_pt_insn, *ip);
insn_cnt += 1;
if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) {
bool eptw;
u64 offs;
if (!intel_pt_jmp_16(intel_pt_insn))
goto out;
/* Check for emulated ptwrite */
offs = offset + intel_pt_insn->length;
eptw = intel_pt_emulated_ptwrite(dso, machine, offs);
intel_pt_insn->emulated_ptwrite = eptw;
goto out;
}
if (max_insn_cnt && insn_cnt >= max_insn_cnt)
goto out_no_cache;
*ip += intel_pt_insn->length;
if (to_ip && *ip == to_ip) {
intel_pt_insn->length = 0;
goto out_no_cache;
}
if (*ip >= map__end(al.map))
break;
offset += intel_pt_insn->length;
}
one_map = false;
}
out:
*insn_cnt_ptr = insn_cnt;
if (!one_map)
goto out_no_cache;
/*
* Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
* entries.
*/
if (to_ip) {
struct intel_pt_cache_entry *e;
e = intel_pt_cache_lookup(map__dso(al.map), machine, start_offset);
if (e)
goto out_ret;
}
/* Ignore cache errors */
intel_pt_cache_add(map__dso(al.map), machine, start_offset, insn_cnt,
*ip - start_ip, intel_pt_insn);
out_ret:
addr_location__exit(&al);
return ret;
out_no_cache:
*insn_cnt_ptr = insn_cnt;
addr_location__exit(&al);
return 0;
}
static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
uint64_t offset, const char *filename)
{
struct addr_filter *filt;
bool have_filter = false;
bool hit_tracestop = false;
bool hit_filter = false;
list_for_each_entry(filt, &pt->filts.head, list) {
if (filt->start)
have_filter = true;
if ((filename && !filt->filename) ||
(!filename && filt->filename) ||
(filename && strcmp(filename, filt->filename)))
continue;
if (!(offset >= filt->addr && offset < filt->addr + filt->size))
continue;
intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
ip, offset, filename ? filename : "[kernel]",
filt->start ? "filter" : "stop",
filt->addr, filt->size);
if (filt->start)
hit_filter = true;
else
hit_tracestop = true;
}
if (!hit_tracestop && !hit_filter)
intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
ip, offset, filename ? filename : "[kernel]");
return hit_tracestop || (have_filter && !hit_filter);
}
static int __intel_pt_pgd_ip(uint64_t ip, void *data)
{
struct intel_pt_queue *ptq = data;
struct thread *thread;
struct addr_location al;
u8 cpumode;
u64 offset;
int res;
if (ptq->state->to_nr) {
if (intel_pt_guest_kernel_ip(ip))
return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
/* No support for decoding guest user space */
return -EINVAL;
} else if (ip >= ptq->pt->kernel_start) {
return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
}
cpumode = PERF_RECORD_MISC_USER;
thread = ptq->thread;
if (!thread)
return -EINVAL;
addr_location__init(&al);
if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map))
return -EINVAL;
offset = map__map_ip(al.map, ip);
res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name);
addr_location__exit(&al);
return res;
}
static bool intel_pt_pgd_ip(uint64_t ip, void *data)
{
return __intel_pt_pgd_ip(ip, data) > 0;
}
static bool intel_pt_get_config(struct intel_pt *pt,
struct perf_event_attr *attr, u64 *config)
{
if (attr->type == pt->pmu_type) {
if (config)
*config = attr->config;
return true;
}
return false;
}
static bool intel_pt_exclude_kernel(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
!evsel->core.attr.exclude_kernel)
return false;
}
return true;
}
static bool intel_pt_return_compression(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
if (!pt->noretcomp_bit)
return true;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
(config & pt->noretcomp_bit))
return false;
}
return true;
}
static bool intel_pt_branch_enable(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
(config & INTEL_PT_CFG_PASS_THRU) &&
!(config & INTEL_PT_CFG_BRANCH_EN))
return false;
}
return true;
}
static bool intel_pt_disabled_tnt(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
config & INTEL_PT_CFG_TNT_DIS)
return true;
}
return false;
}
static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
{
struct evsel *evsel;
unsigned int shift;
u64 config;
if (!pt->mtc_freq_bits)
return 0;
for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
config >>= 1;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config))
return (config & pt->mtc_freq_bits) >> shift;
}
return 0;
}
static bool intel_pt_timeless_decoding(struct intel_pt *pt)
{
struct evsel *evsel;
bool timeless_decoding = true;
u64 config;
if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
return true;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
return true;
if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
if (config & pt->tsc_bit)
timeless_decoding = false;
else
return true;
}
}
return timeless_decoding;
}
static bool intel_pt_tracing_kernel(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
!evsel->core.attr.exclude_kernel)
return true;
}
return false;
}
static bool intel_pt_have_tsc(struct intel_pt *pt)
{
struct evsel *evsel;
bool have_tsc = false;
u64 config;
if (!pt->tsc_bit)
return false;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
if (config & pt->tsc_bit)
have_tsc = true;
else
return false;
}
}
return have_tsc;
}
static bool intel_pt_have_mtc(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
(config & pt->mtc_bit))
return true;
}
return false;
}
static bool intel_pt_sampling_mode(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
evsel->core.attr.aux_sample_size)
return true;
}
return false;
}
static u64 intel_pt_ctl(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config))
return config;
}
return 0;
}
static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
{
u64 quot, rem;
quot = ns / pt->tc.time_mult;
rem = ns % pt->tc.time_mult;
return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
pt->tc.time_mult;
}
static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
{
size_t sz = sizeof(struct ip_callchain);
/* Add 1 to callchain_sz for callchain context */
sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
return zalloc(sz);
}
static int intel_pt_callchain_init(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
}
pt->chain = intel_pt_alloc_chain(pt);
if (!pt->chain)
return -ENOMEM;
return 0;
}
static void intel_pt_add_callchain(struct intel_pt *pt,
struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(pt->machine,
sample->pid,
sample->tid);
thread_stack__sample_late(thread, sample->cpu, pt->chain,
pt->synth_opts.callchain_sz + 1, sample->ip,
pt->kernel_start);
sample->callchain = pt->chain;
}
static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
{
size_t sz = sizeof(struct branch_stack);
sz += entry_cnt * sizeof(struct branch_entry);
return zalloc(sz);
}
static int intel_pt_br_stack_init(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
}
pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
if (!pt->br_stack)
return -ENOMEM;
return 0;
}
static void intel_pt_add_br_stack(struct intel_pt *pt,
struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(pt->machine,
sample->pid,
sample->tid);
thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
pt->br_stack_sz, sample->ip,
pt->kernel_start);
sample->branch_stack = pt->br_stack;
thread__put(thread);
}
/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
unsigned int queue_nr)
{
struct intel_pt_params params = { .get_trace = 0, };
struct perf_env *env = pt->machine->env;
struct intel_pt_queue *ptq;
ptq = zalloc(sizeof(struct intel_pt_queue));
if (!ptq)
return NULL;
if (pt->synth_opts.callchain) {
ptq->chain = intel_pt_alloc_chain(pt);
if (!ptq->chain)
goto out_free;
}
if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
if (!ptq->last_branch)
goto out_free;
}
ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
if (!ptq->event_buf)
goto out_free;
ptq->pt = pt;
ptq->queue_nr = queue_nr;
ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
ptq->pid = -1;
ptq->tid = -1;
ptq->cpu = -1;
ptq->next_tid = -1;
params.get_trace = intel_pt_get_trace;
params.walk_insn = intel_pt_walk_next_insn;
params.lookahead = intel_pt_lookahead;
params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);
params.ctl = intel_pt_ctl(pt);
params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
params.mtc_period = intel_pt_mtc_period(pt);
params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
params.quick = pt->synth_opts.quick;
params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
params.first_timestamp = pt->first_timestamp;
params.max_loops = pt->max_loops;
/* Cannot walk code without TNT, so force 'quick' mode */
if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick)
params.quick = 1;
if (pt->filts.cnt > 0)
params.pgd_ip = intel_pt_pgd_ip;
if (pt->synth_opts.instructions || pt->synth_opts.cycles) {
if (pt->synth_opts.period) {
switch (pt->synth_opts.period_type) {
case PERF_ITRACE_PERIOD_INSTRUCTIONS:
params.period_type =
INTEL_PT_PERIOD_INSTRUCTIONS;
params.period = pt->synth_opts.period;
break;
case PERF_ITRACE_PERIOD_TICKS:
params.period_type = INTEL_PT_PERIOD_TICKS;
params.period = pt->synth_opts.period;
break;
case PERF_ITRACE_PERIOD_NANOSECS:
params.period_type = INTEL_PT_PERIOD_TICKS;
params.period = intel_pt_ns_to_ticks(pt,
pt->synth_opts.period);
break;
default:
break;
}
}
if (!params.period) {
params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
params.period = 1;
}
}
if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
params.flags |= INTEL_PT_FUP_WITH_NLIP;
ptq->decoder = intel_pt_decoder_new(¶ms);
if (!ptq->decoder)
goto out_free;
return ptq;
out_free:
zfree(&ptq->event_buf);
zfree(&ptq->last_branch);
zfree(&ptq->chain);
free(ptq);
return NULL;
}
static void intel_pt_free_queue(void *priv)
{
struct intel_pt_queue *ptq = priv;
if (!ptq)
return;
thread__zput(ptq->thread);
thread__zput(ptq->guest_thread);
thread__zput(ptq->unknown_guest_thread);
intel_pt_decoder_free(ptq->decoder);
zfree(&ptq->event_buf);
zfree(&ptq->last_branch);
zfree(&ptq->chain);
free(ptq);
}
static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
{
unsigned int i;
pt->first_timestamp = timestamp;
for (i = 0; i < pt->queues.nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq && ptq->decoder)
intel_pt_set_first_timestamp(ptq->decoder, timestamp);
}
}
static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
{
struct machines *machines = &ptq->pt->session->machines;
struct machine *machine;
pid_t machine_pid = ptq->pid;
pid_t tid;
int vcpu;
if (machine_pid <= 0)
return 0; /* Not a guest machine */
machine = machines__find(machines, machine_pid);
if (!machine)
return 0; /* Not a guest machine */
if (ptq->guest_machine != machine) {
ptq->guest_machine = NULL;
thread__zput(ptq->guest_thread);
thread__zput(ptq->unknown_guest_thread);
ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
if (!ptq->unknown_guest_thread)
return -1;
ptq->guest_machine = machine;
}
vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
if (vcpu < 0)
return -1;
tid = machine__get_current_tid(machine, vcpu);
if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
thread__zput(ptq->guest_thread);
if (!ptq->guest_thread) {
ptq->guest_thread = machine__find_thread(machine, -1, tid);
if (!ptq->guest_thread)
return -1;
}
ptq->guest_machine_pid = machine_pid;
ptq->guest_pid = thread__pid(ptq->guest_thread);
ptq->guest_tid = tid;
ptq->vcpu = vcpu;
return 0;
}
static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
struct auxtrace_queue *queue)
{
struct intel_pt_queue *ptq = queue->priv;
if (queue->tid == -1 || pt->have_sched_switch) {
ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
if (ptq->tid == -1)
ptq->pid = -1;
thread__zput(ptq->thread);
}
if (!ptq->thread && ptq->tid != -1)
ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
if (ptq->thread) {
ptq->pid = thread__pid(ptq->thread);
if (queue->cpu == -1)
ptq->cpu = thread__cpu(ptq->thread);
}
if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
ptq->guest_machine_pid = 0;
ptq->guest_pid = -1;
ptq->guest_tid = -1;
ptq->vcpu = -1;
}
}
static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
ptq->insn_len = 0;
if (ptq->state->flags & INTEL_PT_ABORT_TX) {
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
if (!ptq->state->to_ip)
ptq->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_TRACE_END;
else if (ptq->state->from_nr && !ptq->state->to_nr)
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_VMEXIT;
else
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT;
} else {
if (ptq->state->from_ip)
ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
else
ptq->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_TRACE_BEGIN;
if (ptq->state->flags & INTEL_PT_IN_TX)
ptq->flags |= PERF_IP_FLAG_IN_TX;
ptq->insn_len = ptq->state->insn_len;
memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
}
if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
if (ptq->state->type & INTEL_PT_TRACE_END)
ptq->flags |= PERF_IP_FLAG_TRACE_END;
if (pt->cap_event_trace) {
if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
if (!ptq->state->from_iflag)
ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
if (ptq->state->from_iflag != ptq->state->to_iflag)
ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
} else if (!ptq->state->to_iflag) {
ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
}
}
}
static void intel_pt_setup_time_range(struct intel_pt *pt,
struct intel_pt_queue *ptq)
{
if (!pt->range_cnt)
return;
ptq->sel_timestamp = pt->time_ranges[0].start;
ptq->sel_idx = 0;
if (ptq->sel_timestamp) {
ptq->sel_start = true;
} else {
ptq->sel_timestamp = pt->time_ranges[0].end;
ptq->sel_start = false;
}
}
static int intel_pt_setup_queue(struct intel_pt *pt,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
struct intel_pt_queue *ptq = queue->priv;
if (list_empty(&queue->head))
return 0;
if (!ptq) {
ptq = intel_pt_alloc_queue(pt, queue_nr);
if (!ptq)
return -ENOMEM;
queue->priv = ptq;
if (queue->cpu != -1)
ptq->cpu = queue->cpu;
ptq->tid = queue->tid;
ptq->cbr_seen = UINT_MAX;
if (pt->sampling_mode && !pt->snapshot_mode &&
pt->timeless_decoding)
ptq->step_through_buffers = true;
ptq->sync_switch = pt->sync_switch;
intel_pt_setup_time_range(pt, ptq);
}
if (!ptq->on_heap &&
(!ptq->sync_switch ||
ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
const struct intel_pt_state *state;
int ret;
if (pt->timeless_decoding)
return 0;
intel_pt_log("queue %u getting timestamp\n", queue_nr);
intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
queue_nr, ptq->cpu, ptq->pid, ptq->tid);
if (ptq->sel_start && ptq->sel_timestamp) {
ret = intel_pt_fast_forward(ptq->decoder,
ptq->sel_timestamp);
if (ret)
return ret;
}
while (1) {
state = intel_pt_decode(ptq->decoder);
if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA) {
intel_pt_log("queue %u has no timestamp\n",
queue_nr);
return 0;
}
continue;
}
if (state->timestamp)
break;
}
ptq->timestamp = state->timestamp;
intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
queue_nr, ptq->timestamp);
ptq->state = state;
ptq->have_sample = true;
if (ptq->sel_start && ptq->sel_timestamp &&
ptq->timestamp < ptq->sel_timestamp)
ptq->have_sample = false;
intel_pt_sample_flags(ptq);
ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
if (ret)
return ret;
ptq->on_heap = true;
}
return 0;
}
static int intel_pt_setup_queues(struct intel_pt *pt)
{
unsigned int i;
int ret;
for (i = 0; i < pt->queues.nr_queues; i++) {
ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
if (ret)
return ret;
}
return 0;
}
static inline bool intel_pt_skip_event(struct intel_pt *pt)
{
return pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip;
}
/*
* Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
* Also ensure CBR is first non-skipped event by allowing for 4 more samples
* from this decoder state.
*/
static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
{
return pt->synth_opts.initial_skip &&
pt->num_events + 4 < pt->synth_opts.initial_skip;
}
static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.size = sizeof(struct perf_event_header);
sample->pid = ptq->pid;
sample->tid = ptq->tid;
if (ptq->pt->have_guest_sideband) {
if ((ptq->state->from_ip && ptq->state->from_nr) ||
(ptq->state->to_ip && ptq->state->to_nr)) {
sample->pid = ptq->guest_pid;
sample->tid = ptq->guest_tid;
sample->machine_pid = ptq->guest_machine_pid;
sample->vcpu = ptq->vcpu;
}
}
sample->cpu = ptq->cpu;
sample->insn_len = ptq->insn_len;
memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
}
static void intel_pt_prep_b_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_a_sample(ptq, event, sample);
if (!pt->timeless_decoding)
sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample->ip = ptq->state->from_ip;
sample->addr = ptq->state->to_ip;
sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
sample->period = 1;
sample->flags = ptq->flags;
event->sample.header.misc = sample->cpumode;
}
static int intel_pt_inject_event(union perf_event *event,
struct perf_sample *sample, u64 type)
{
event->header.size = perf_event__sample_event_size(sample, type, 0);
return perf_event__synthesize_sample(event, type, 0, sample);
}
static inline int intel_pt_opt_inject(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
if (!pt->synth_opts.inject)
return 0;
return intel_pt_inject_event(event, sample, type);
}
static int intel_pt_deliver_synth_event(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
int ret;
ret = intel_pt_opt_inject(pt, event, sample, type);
if (ret)
return ret;
ret = perf_session__deliver_synth_event(pt->session, event, sample);
if (ret)
pr_err("Intel PT: failed to deliver event, error %d\n", ret);
return ret;
}
static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct dummy_branch_stack {
u64 nr;
u64 hw_idx;
struct branch_entry entries;
} dummy_bs;
if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
return 0;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_b_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->branches_id;
sample.stream_id = ptq->pt->branches_id;
/*
* perf report cannot handle events without a branch stack when using
* SORT_MODE__BRANCH so make a dummy one.
*/
if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
dummy_bs = (struct dummy_branch_stack){
.nr = 1,
.hw_idx = -1ULL,
.entries = {
.from = sample.ip,
.to = sample.addr,
},
};
sample.branch_stack = (struct branch_stack *)&dummy_bs;
}
if (ptq->sample_ipc)
sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
}
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->branches_sample_type);
}
static void intel_pt_prep_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_b_sample(pt, ptq, event, sample);
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
pt->synth_opts.callchain_sz + 1,
sample->ip, pt->kernel_start);
sample->callchain = ptq->chain;
}
if (pt->synth_opts.last_branch) {
thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
pt->br_stack_sz);
sample->branch_stack = ptq->last_branch;
}
}
static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->instructions_id;
sample.stream_id = ptq->pt->instructions_id;
if (pt->synth_opts.quick)
sample.period = 1;
else
sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
if (ptq->sample_ipc)
sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
}
ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->instructions_sample_type);
}
static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
u64 period = 0;
if (ptq->sample_ipc)
period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt;
if (!period || intel_pt_skip_event(pt))
return 0;
intel_pt_prep_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->cycles_id;
sample.stream_id = ptq->pt->cycles_id;
sample.period = period;
sample.cyc_cnt = period;
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt;
return intel_pt_deliver_synth_event(pt, event, &sample, pt->cycles_sample_type);
}
static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->transactions_id;
sample.stream_id = ptq->pt->transactions_id;
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->transactions_sample_type);
}
static void intel_pt_prep_p_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_sample(pt, ptq, event, sample);
/*
* Zero IP is used to mean "trace start" but that is not the case for
* power or PTWRITE events with no IP, so clear the flags.
*/
if (!sample->ip)
sample->flags = 0;
}
static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_ptwrite raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->ptwrites_id;
sample.stream_id = ptq->pt->ptwrites_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
raw.payload = cpu_to_le64(ptq->state->ptw_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->ptwrites_sample_type);
}
static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_cbr raw;
u32 flags;
if (intel_pt_skip_cbr_event(pt))
return 0;
ptq->cbr_seen = ptq->state->cbr;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->cbr_id;
sample.stream_id = ptq->pt->cbr_id;
flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
raw.flags = cpu_to_le32(flags);
raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
raw.reserved3 = 0;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_psb raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->psb_id;
sample.stream_id = ptq->pt->psb_id;
sample.flags = 0;
raw.reserved = 0;
raw.offset = ptq->state->psb_offset;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_mwait raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->mwait_id;
sample.stream_id = ptq->pt->mwait_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->mwait_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwre raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwre_id;
sample.stream_id = ptq->pt->pwre_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwre_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_exstop raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->exstop_id;
sample.stream_id = ptq->pt->exstop_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwrx raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwrx_id;
sample.stream_id = ptq->pt->pwrx_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
/*
* PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
* intel_pt_add_gp_regs().
*/
static const int pebs_gp_regs[] = {
[PERF_REG_X86_FLAGS] = 1,
[PERF_REG_X86_IP] = 2,
[PERF_REG_X86_AX] = 3,
[PERF_REG_X86_CX] = 4,
[PERF_REG_X86_DX] = 5,
[PERF_REG_X86_BX] = 6,
[PERF_REG_X86_SP] = 7,
[PERF_REG_X86_BP] = 8,
[PERF_REG_X86_SI] = 9,
[PERF_REG_X86_DI] = 10,
[PERF_REG_X86_R8] = 11,
[PERF_REG_X86_R9] = 12,
[PERF_REG_X86_R10] = 13,
[PERF_REG_X86_R11] = 14,
[PERF_REG_X86_R12] = 15,
[PERF_REG_X86_R13] = 16,
[PERF_REG_X86_R14] = 17,
[PERF_REG_X86_R15] = 18,
};
static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
const struct intel_pt_blk_items *items,
u64 regs_mask)
{
const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
u32 bit;
int i;
for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
/* Get the PEBS gp_regs array index */
int n = pebs_gp_regs[i] - 1;
if (n < 0)
continue;
/*
* Add only registers that were requested (i.e. 'regs_mask') and
* that were provided (i.e. 'mask'), and update the resulting
* mask (i.e. 'intr_regs->mask') accordingly.
*/
if (mask & 1 << n && regs_mask & bit) {
intr_regs->mask |= bit;
*pos++ = gp_regs[n];
}
}
return pos;
}
#ifndef PERF_REG_X86_XMM0
#define PERF_REG_X86_XMM0 32
#endif
static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
const struct intel_pt_blk_items *items,
u64 regs_mask)
{
u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
const u64 *xmm = items->xmm;
/*
* If there are any XMM registers, then there should be all of them.
* Nevertheless, follow the logic to add only registers that were
* requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
* and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
*/
intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
for (; mask; mask >>= 1, xmm++) {
if (mask & 1)
*pos++ = *xmm;
}
}
#define LBR_INFO_MISPRED (1ULL << 63)
#define LBR_INFO_IN_TX (1ULL << 62)
#define LBR_INFO_ABORT (1ULL << 61)
#define LBR_INFO_CYCLES 0xffff
/* Refer kernel's intel_pmu_store_pebs_lbrs() */
static u64 intel_pt_lbr_flags(u64 info)
{
union {
struct branch_flags flags;
u64 result;
} u;
u.result = 0;
u.flags.mispred = !!(info & LBR_INFO_MISPRED);
u.flags.predicted = !(info & LBR_INFO_MISPRED);
u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
u.flags.abort = !!(info & LBR_INFO_ABORT);
u.flags.cycles = info & LBR_INFO_CYCLES;
return u.result;
}
static void intel_pt_add_lbrs(struct branch_stack *br_stack,
const struct intel_pt_blk_items *items)
{
u64 *to;
int i;
br_stack->nr = 0;
to = &br_stack->entries[0].from;
for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
u32 mask = items->mask[i];
const u64 *from = items->val[i];
for (; mask; mask >>= 3, from += 3) {
if ((mask & 7) == 7) {
*to++ = from[0];
*to++ = from[1];
*to++ = intel_pt_lbr_flags(from[2]);
br_stack->nr += 1;
}
}
}
}
static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
struct perf_sample sample = { .ip = 0, };
union perf_event *event = ptq->event_buf;
struct intel_pt *pt = ptq->pt;
u64 sample_type = evsel->core.attr.sample_type;
u8 cpumode;
u64 regs[8 * sizeof(sample.intr_regs.mask)];
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_a_sample(ptq, event, &sample);
sample.id = id;
sample.stream_id = id;
if (!evsel->core.attr.freq)
sample.period = evsel->core.attr.sample_period;
/* No support for non-zero CS base */
if (items->has_ip)
sample.ip = items->ip;
else if (items->has_rip)
sample.ip = items->rip;
else
sample.ip = ptq->state->from_ip;
cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
sample.cpumode = cpumode;
if (sample_type & PERF_SAMPLE_TIME) {
u64 timestamp = 0;
if (items->has_timestamp)
timestamp = items->timestamp;
else if (!pt->timeless_decoding)
timestamp = ptq->timestamp;
if (timestamp)
sample.time = tsc_to_perf_time(timestamp, &pt->tc);
}
if (sample_type & PERF_SAMPLE_CALLCHAIN &&
pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
pt->synth_opts.callchain_sz, sample.ip,
pt->kernel_start);
sample.callchain = ptq->chain;
}
if (sample_type & PERF_SAMPLE_REGS_INTR &&
(items->mask[INTEL_PT_GP_REGS_POS] ||
items->mask[INTEL_PT_XMM_POS])) {
u64 regs_mask = evsel->core.attr.sample_regs_intr;
u64 *pos;
sample.intr_regs.abi = items->is_32_bit ?
PERF_SAMPLE_REGS_ABI_32 :
PERF_SAMPLE_REGS_ABI_64;
sample.intr_regs.regs = regs;
pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (items->mask[INTEL_PT_LBR_0_POS] ||
items->mask[INTEL_PT_LBR_1_POS] ||
items->mask[INTEL_PT_LBR_2_POS]) {
intel_pt_add_lbrs(ptq->last_branch, items);
} else if (pt->synth_opts.last_branch) {
thread_stack__br_sample(ptq->thread, ptq->cpu,
ptq->last_branch,
pt->br_stack_sz);
} else {
ptq->last_branch->nr = 0;
}
sample.branch_stack = ptq->last_branch;
}
if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
sample.addr = items->mem_access_address;
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
/*
* Refer kernel's setup_pebs_adaptive_sample_data() and
* intel_hsw_weight().
*/
if (items->has_mem_access_latency) {
u64 weight = items->mem_access_latency >> 32;
/*
* Starts from SPR, the mem access latency field
* contains both cache latency [47:32] and instruction
* latency [15:0]. The cache latency is the same as the
* mem access latency on previous platforms.
*
* In practice, no memory access could last than 4G
* cycles. Use latency >> 32 to distinguish the
* different format of the mem access latency field.
*/
if (weight > 0) {
sample.weight = weight & 0xffff;
sample.ins_lat = items->mem_access_latency & 0xffff;
} else
sample.weight = items->mem_access_latency;
}
if (!sample.weight && items->has_tsx_aux_info) {
/* Cycles last block */
sample.weight = (u32)items->tsx_aux_info;
}
}
if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
u64 ax = items->has_rax ? items->rax : 0;
/* Refer kernel's intel_hsw_transaction() */
u64 txn = (u8)(items->tsx_aux_info >> 32);
/* For RTM XABORTs also log the abort code from AX */
if (txn & PERF_TXN_TRANSACTION && ax & 1)
txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
sample.transaction = txn;
}
return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
}
static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
struct evsel *evsel = pt->pebs_evsel;
u64 id = evsel->core.id[0];
return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
}
static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
struct intel_pt_pebs_event *pe;
struct intel_pt *pt = ptq->pt;
int err = -EINVAL;
int hw_id;
if (!items->has_applicable_counters || !items->applicable_counters) {
if (!pt->single_pebs)
pr_err("PEBS-via-PT record with no applicable_counters\n");
return intel_pt_synth_single_pebs_sample(ptq);
}
for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
pe = &ptq->pebs[hw_id];
if (!pe->evsel) {
if (!pt->single_pebs)
pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
hw_id);
return intel_pt_synth_single_pebs_sample(ptq);
}
err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
if (err)
return err;
}
return err;
}
static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct {
struct perf_synth_intel_evt cfe;
struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS];
} raw;
int i;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->evt_id;
sample.stream_id = ptq->pt->evt_id;
raw.cfe.type = ptq->state->cfe_type;
raw.cfe.reserved = 0;
raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
raw.cfe.vector = ptq->state->cfe_vector;
raw.cfe.evd_cnt = ptq->state->evd_cnt;
for (i = 0; i < ptq->state->evd_cnt; i++) {
raw.evd[i].et = 0;
raw.evd[i].evd_type = ptq->state->evd[i].type;
raw.evd[i].payload = ptq->state->evd[i].payload;
}
sample.raw_size = perf_synth__raw_size(raw) +
ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->evt_sample_type);
}
static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_iflag_chg raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->iflag_chg_id;
sample.stream_id = ptq->pt->iflag_chg_id;
raw.flags = 0;
raw.iflag = ptq->state->to_iflag;
if (ptq->state->type & INTEL_PT_BRANCH) {
raw.via_branch = 1;
raw.branch_ip = ptq->state->to_ip;
} else {
sample.addr = 0;
}
sample.flags = ptq->flags;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->iflag_chg_sample_type);
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
pid_t pid, pid_t tid, u64 ip, u64 timestamp,
pid_t machine_pid, int vcpu)
{
bool dump_log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
bool log_on_stdout = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT;
union perf_event event;
char msg[MAX_AUXTRACE_ERROR_MSG];
int err;
if (pt->synth_opts.error_minus_flags) {
if (code == INTEL_PT_ERR_OVR &&
pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
return 0;
if (code == INTEL_PT_ERR_LOST &&
pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
return 0;
}
intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
code, cpu, pid, tid, ip, msg, timestamp,
machine_pid, vcpu);
if (intel_pt_enable_logging && !log_on_stdout) {
FILE *fp = intel_pt_log_fp();
if (fp)
perf_event__fprintf_auxtrace_error(&event, fp);
}
if (code != INTEL_PT_ERR_LOST && dump_log_on_error)
intel_pt_log_dump_buf();
err = perf_session__deliver_synth_event(pt->session, &event, NULL);
if (err)
pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
err);
return err;
}
static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
const struct intel_pt_state *state)
{
struct intel_pt *pt = ptq->pt;
u64 tm = ptq->timestamp;
pid_t machine_pid = 0;
pid_t pid = ptq->pid;
pid_t tid = ptq->tid;
int vcpu = -1;
tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
if (pt->have_guest_sideband && state->from_nr) {
machine_pid = ptq->guest_machine_pid;
vcpu = ptq->vcpu;
pid = ptq->guest_pid;
tid = ptq->guest_tid;
}
return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
state->from_ip, tm, machine_pid, vcpu);
}
static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
{
struct auxtrace_queue *queue;
pid_t tid = ptq->next_tid;
int err;
if (tid == -1)
return 0;
intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
queue = &pt->queues.queue_array[ptq->queue_nr];
intel_pt_set_pid_tid_cpu(pt, queue);
ptq->next_tid = -1;
return err;
}
static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
{
struct intel_pt *pt = ptq->pt;
return ip == pt->switch_ip &&
(ptq->flags & PERF_IP_FLAG_BRANCH) &&
!(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
}
#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
static int intel_pt_sample(struct intel_pt_queue *ptq)
{
const struct intel_pt_state *state = ptq->state;
struct intel_pt *pt = ptq->pt;
int err;
if (!ptq->have_sample)
return 0;
ptq->have_sample = false;
if (pt->synth_opts.approx_ipc) {
ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
ptq->ipc_cyc_cnt = ptq->state->cycles;
ptq->sample_ipc = true;
} else {
ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
}
/* Ensure guest code maps are set up */
if (symbol_conf.guest_code && (state->from_nr || state->to_nr))
intel_pt_get_guest(ptq);
/*
* Do PEBS first to allow for the possibility that the PEBS timestamp
* precedes the current timestamp.
*/
if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
err = intel_pt_synth_pebs_sample(ptq);
if (err)
return err;
}
if (pt->synth_opts.intr_events) {
if (state->type & INTEL_PT_EVT) {
err = intel_pt_synth_events_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_IFLAG_CHG) {
err = intel_pt_synth_iflag_chg_sample(ptq);
if (err)
return err;
}
}
if (pt->sample_pwr_events) {
if (state->type & INTEL_PT_PSB_EVT) {
err = intel_pt_synth_psb_sample(ptq);
if (err)
return err;
}
if (ptq->state->cbr != ptq->cbr_seen) {
err = intel_pt_synth_cbr_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_EVT) {
if (state->type & INTEL_PT_MWAIT_OP) {
err = intel_pt_synth_mwait_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_ENTRY) {
err = intel_pt_synth_pwre_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_EX_STOP) {
err = intel_pt_synth_exstop_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_EXIT) {
err = intel_pt_synth_pwrx_sample(ptq);
if (err)
return err;
}
}
}
if (state->type & INTEL_PT_INSTRUCTION) {
if (pt->sample_instructions) {
err = intel_pt_synth_instruction_sample(ptq);
if (err)
return err;
}
if (pt->sample_cycles) {
err = intel_pt_synth_cycle_sample(ptq);
if (err)
return err;
}
}
if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
err = intel_pt_synth_transaction_sample(ptq);
if (err)
return err;
}
if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
err = intel_pt_synth_ptwrite_sample(ptq);
if (err)
return err;
}
if (!(state->type & INTEL_PT_BRANCH))
return 0;
if (pt->use_thread_stack) {
thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
state->from_ip, state->to_ip, ptq->insn_len,
state->trace_nr, pt->callstack,
pt->br_stack_sz_plus,
pt->mispred_all);
} else {
thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
}
if (pt->sample_branches) {
if (state->from_nr != state->to_nr &&
state->from_ip && state->to_ip) {
struct intel_pt_state *st = (struct intel_pt_state *)state;
u64 to_ip = st->to_ip;
u64 from_ip = st->from_ip;
/*
* perf cannot handle having different machines for ip
* and addr, so create 2 branches.
*/
st->to_ip = 0;
err = intel_pt_synth_branch_sample(ptq);
if (err)
return err;
st->from_ip = 0;
st->to_ip = to_ip;
err = intel_pt_synth_branch_sample(ptq);
st->from_ip = from_ip;
} else {
err = intel_pt_synth_branch_sample(ptq);
}
if (err)
return err;
}
if (!ptq->sync_switch)
return 0;
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
switch (ptq->switch_state) {
case INTEL_PT_SS_NOT_TRACING:
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
err = intel_pt_next_tid(pt, ptq);
if (err)
return err;
ptq->switch_state = INTEL_PT_SS_TRACING;
break;
default:
ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
return 1;
}
} else if (!state->to_ip) {
ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
} else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
ptq->switch_state = INTEL_PT_SS_UNKNOWN;
} else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
state->to_ip == pt->ptss_ip &&
(ptq->flags & PERF_IP_FLAG_CALL)) {
ptq->switch_state = INTEL_PT_SS_TRACING;
}
return 0;
}
static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
{
struct machine *machine = pt->machine;
struct map *map;
struct symbol *sym, *start;
u64 ip, switch_ip = 0;
const char *ptss;
if (ptss_ip)
*ptss_ip = 0;
map = machine__kernel_map(machine);
if (!map)
return 0;
if (map__load(map))
return 0;
start = dso__first_symbol(map__dso(map));
for (sym = start; sym; sym = dso__next_symbol(sym)) {
if (sym->binding == STB_GLOBAL &&
!strcmp(sym->name, "__switch_to")) {
ip = map__unmap_ip(map, sym->start);
if (ip >= map__start(map) && ip < map__end(map)) {
switch_ip = ip;
break;
}
}
}
if (!switch_ip || !ptss_ip)
return 0;
if (pt->have_sched_switch == 1)
ptss = "perf_trace_sched_switch";
else
ptss = "__perf_event_task_sched_out";
for (sym = start; sym; sym = dso__next_symbol(sym)) {
if (!strcmp(sym->name, ptss)) {
ip = map__unmap_ip(map, sym->start);
if (ip >= map__start(map) && ip < map__end(map)) {
*ptss_ip = ip;
break;
}
}
}
return switch_ip;
}
static void intel_pt_enable_sync_switch(struct intel_pt *pt)
{
unsigned int i;
if (pt->sync_switch_not_supported)
return;
pt->sync_switch = true;
for (i = 0; i < pt->queues.nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq)
ptq->sync_switch = true;
}
}
static void intel_pt_disable_sync_switch(struct intel_pt *pt)
{
unsigned int i;
pt->sync_switch = false;
for (i = 0; i < pt->queues.nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq) {
ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
}
}
/*
* To filter against time ranges, it is only necessary to look at the next start
* or end time.
*/
static bool intel_pt_next_time(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
if (ptq->sel_start) {
/* Next time is an end time */
ptq->sel_start = false;
ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
return true;
} else if (ptq->sel_idx + 1 < pt->range_cnt) {
/* Next time is a start time */
ptq->sel_start = true;
ptq->sel_idx += 1;
ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
return true;
}
/* No next time */
return false;
}
static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
{
int err;
while (1) {
if (ptq->sel_start) {
if (ptq->timestamp >= ptq->sel_timestamp) {
/* After start time, so consider next time */
intel_pt_next_time(ptq);
if (!ptq->sel_timestamp) {
/* No end time */
return 0;
}
/* Check against end time */
continue;
}
/* Before start time, so fast forward */
ptq->have_sample = false;
if (ptq->sel_timestamp > *ff_timestamp) {
if (ptq->sync_switch) {
intel_pt_next_tid(ptq->pt, ptq);
ptq->switch_state = INTEL_PT_SS_UNKNOWN;
}
*ff_timestamp = ptq->sel_timestamp;
err = intel_pt_fast_forward(ptq->decoder,
ptq->sel_timestamp);
if (err)
return err;
}
return 0;
} else if (ptq->timestamp > ptq->sel_timestamp) {
/* After end time, so consider next time */
if (!intel_pt_next_time(ptq)) {
/* No next time range, so stop decoding */
ptq->have_sample = false;
ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
return 1;
}
/* Check against next start time */
continue;
} else {
/* Before end time */
return 0;
}
}
}
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
{
const struct intel_pt_state *state = ptq->state;
struct intel_pt *pt = ptq->pt;
u64 ff_timestamp = 0;
int err;
if (!pt->kernel_start) {
pt->kernel_start = machine__kernel_start(pt->machine);
if (pt->per_cpu_mmaps &&
(pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
!pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
!pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
pt->switch_ip, pt->ptss_ip);
intel_pt_enable_sync_switch(pt);
}
}
}
intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
while (1) {
err = intel_pt_sample(ptq);
if (err)
return err;
state = intel_pt_decode(ptq->decoder);
if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA)
return 1;
if (ptq->sync_switch &&
state->from_ip >= pt->kernel_start) {
ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
ptq->timestamp = state->est_timestamp;
if (pt->synth_opts.errors) {
err = intel_ptq_synth_error(ptq, state);
if (err)
return err;
}
continue;
}
ptq->state = state;
ptq->have_sample = true;
intel_pt_sample_flags(ptq);
/* Use estimated TSC upon return to user space */
if (pt->est_tsc &&
(state->from_ip >= pt->kernel_start || !state->from_ip) &&
state->to_ip && state->to_ip < pt->kernel_start) {
intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp;
/* Use estimated TSC in unknown switch state */
} else if (ptq->sync_switch &&
ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
intel_pt_is_switch_ip(ptq, state->to_ip) &&
ptq->next_tid == -1) {
intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp;
} else if (state->timestamp > ptq->timestamp) {
ptq->timestamp = state->timestamp;
}
if (ptq->sel_timestamp) {
err = intel_pt_time_filter(ptq, &ff_timestamp);
if (err)
return err;
}
if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
*timestamp = ptq->timestamp;
return 0;
}
}
return 0;
}
static inline int intel_pt_update_queues(struct intel_pt *pt)
{
if (pt->queues.new_data) {
pt->queues.new_data = false;
return intel_pt_setup_queues(pt);
}
return 0;
}
static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
{
unsigned int queue_nr;
u64 ts;
int ret;
while (1) {
struct auxtrace_queue *queue;
struct intel_pt_queue *ptq;
if (!pt->heap.heap_cnt)
return 0;
if (pt->heap.heap_array[0].ordinal >= timestamp)
return 0;
queue_nr = pt->heap.heap_array[0].queue_nr;
queue = &pt->queues.queue_array[queue_nr];
ptq = queue->priv;
intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
queue_nr, pt->heap.heap_array[0].ordinal,
timestamp);
auxtrace_heap__pop(&pt->heap);
if (pt->heap.heap_cnt) {
ts = pt->heap.heap_array[0].ordinal + 1;
if (ts > timestamp)
ts = timestamp;
} else {
ts = timestamp;
}
intel_pt_set_pid_tid_cpu(pt, queue);
ret = intel_pt_run_decoder(ptq, &ts);
if (ret < 0) {
auxtrace_heap__add(&pt->heap, queue_nr, ts);
return ret;
}
if (!ret) {
ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
if (ret < 0)
return ret;
} else {
ptq->on_heap = false;
}
}
return 0;
}
static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
u64 time_)
{
struct auxtrace_queues *queues = &pt->queues;
unsigned int i;
u64 ts = 0;
for (i = 0; i < queues->nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq && (tid == -1 || ptq->tid == tid)) {
ptq->time = time_;
intel_pt_set_pid_tid_cpu(pt, queue);
intel_pt_run_decoder(ptq, &ts);
}
}
return 0;
}
static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
struct auxtrace_queue *queue,
struct perf_sample *sample)
{
struct machine *m = ptq->pt->machine;
ptq->pid = sample->pid;
ptq->tid = sample->tid;
ptq->cpu = queue->cpu;
intel_pt_log("queue %u cpu %d pid %d tid %d\n",
ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
thread__zput(ptq->thread);
if (ptq->tid == -1)
return;
if (ptq->pid == -1) {
ptq->thread = machine__find_thread(m, -1, ptq->tid);
if (ptq->thread)
ptq->pid = thread__pid(ptq->thread);
return;
}
ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
}
static int intel_pt_process_timeless_sample(struct intel_pt *pt,
struct perf_sample *sample)
{
struct auxtrace_queue *queue;
struct intel_pt_queue *ptq;
u64 ts = 0;
queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
if (!queue)
return -EINVAL;
ptq = queue->priv;
if (!ptq)
return 0;
ptq->stop = false;
ptq->time = sample->time;
intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
intel_pt_run_decoder(ptq, &ts);
return 0;
}
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
sample->pid, sample->tid, 0, sample->time,
sample->machine_pid, sample->vcpu);
}
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
{
unsigned i, j;
if (cpu < 0 || !pt->queues.nr_queues)
return NULL;
if ((unsigned)cpu >= pt->queues.nr_queues)
i = pt->queues.nr_queues - 1;
else
i = cpu;
if (pt->queues.queue_array[i].cpu == cpu)
return pt->queues.queue_array[i].priv;
for (j = 0; i > 0; j++) {
if (pt->queues.queue_array[--i].cpu == cpu)
return pt->queues.queue_array[i].priv;
}
for (; j < pt->queues.nr_queues; j++) {
if (pt->queues.queue_array[j].cpu == cpu)
return pt->queues.queue_array[j].priv;
}
return NULL;
}
static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
u64 timestamp)
{
struct intel_pt_queue *ptq;
int err;
if (!pt->sync_switch)
return 1;
ptq = intel_pt_cpu_to_ptq(pt, cpu);
if (!ptq || !ptq->sync_switch)
return 1;
switch (ptq->switch_state) {
case INTEL_PT_SS_NOT_TRACING:
break;
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_TRACING:
ptq->next_tid = tid;
ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
return 0;
case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
if (!ptq->on_heap) {
ptq->timestamp = perf_time_to_tsc(timestamp,
&pt->tc);
err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
ptq->timestamp);
if (err)
return err;
ptq->on_heap = true;
}
ptq->switch_state = INTEL_PT_SS_TRACING;
break;
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
break;
default:
break;
}
ptq->next_tid = -1;
return 1;
}
#ifdef HAVE_LIBTRACEEVENT
static int intel_pt_process_switch(struct intel_pt *pt,
struct perf_sample *sample)
{
pid_t tid;
int cpu, ret;
struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
if (evsel != pt->switch_evsel)
return 0;
tid = evsel__intval(evsel, sample, "next_pid");
cpu = sample->cpu;
intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
cpu, tid, sample->time, perf_time_to_tsc(sample->time,
&pt->tc));
ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
if (ret <= 0)
return ret;
return machine__set_current_tid(pt->machine, cpu, -1, tid);
}
#endif /* HAVE_LIBTRACEEVENT */
static int intel_pt_context_switch_in(struct intel_pt *pt,
struct perf_sample *sample)
{
pid_t pid = sample->pid;
pid_t tid = sample->tid;
int cpu = sample->cpu;
if (pt->sync_switch) {
struct intel_pt_queue *ptq;
ptq = intel_pt_cpu_to_ptq(pt, cpu);
if (ptq && ptq->sync_switch) {
ptq->next_tid = -1;
switch (ptq->switch_state) {
case INTEL_PT_SS_NOT_TRACING:
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_TRACING:
break;
case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
ptq->switch_state = INTEL_PT_SS_TRACING;
break;
default:
break;
}
}
}
/*
* If the current tid has not been updated yet, ensure it is now that
* a "switch in" event has occurred.
*/
if (machine__get_current_tid(pt->machine, cpu) == tid)
return 0;
return machine__set_current_tid(pt->machine, cpu, pid, tid);
}
static int intel_pt_guest_context_switch(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
{
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
struct machines *machines = &pt->session->machines;
struct machine *machine = machines__find(machines, sample->machine_pid);
pt->have_guest_sideband = true;
/*
* sync_switch cannot handle guest machines at present, so just disable
* it.
*/
pt->sync_switch_not_supported = true;
if (pt->sync_switch)
intel_pt_disable_sync_switch(pt);
if (out)
return 0;
if (!machine)
return -EINVAL;
return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid);
}
static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
struct perf_sample *sample)
{
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
pid_t pid, tid;
int cpu, ret;
if (perf_event__is_guest(event))
return intel_pt_guest_context_switch(pt, event, sample);
cpu = sample->cpu;
if (pt->have_sched_switch == 3) {
if (!out)
return intel_pt_context_switch_in(pt, sample);
if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
pr_err("Expecting CPU-wide context switch event\n");
return -EINVAL;
}
pid = event->context_switch.next_prev_pid;
tid = event->context_switch.next_prev_tid;
} else {
if (out)
return 0;
pid = sample->pid;
tid = sample->tid;
}
if (tid == -1)
intel_pt_log("context_switch event has no tid\n");
ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
if (ret <= 0)
return ret;
return machine__set_current_tid(pt->machine, cpu, pid, tid);
}
static int intel_pt_process_itrace_start(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
{
if (!pt->per_cpu_mmaps)
return 0;
intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
sample->cpu, event->itrace_start.pid,
event->itrace_start.tid, sample->time,
perf_time_to_tsc(sample->time, &pt->tc));
return machine__set_current_tid(pt->machine, sample->cpu,
event->itrace_start.pid,
event->itrace_start.tid);
}
static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
{
u64 hw_id = event->aux_output_hw_id.hw_id;
struct auxtrace_queue *queue;
struct intel_pt_queue *ptq;
struct evsel *evsel;
queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
pr_err("Bad AUX output hardware ID\n");
return -EINVAL;
}
ptq = queue->priv;
ptq->pebs[hw_id].evsel = evsel;
ptq->pebs[hw_id].id = sample->id;
return 0;
}
static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
if (!al->map || addr < map__start(al->map) || addr >= map__end(al->map)) {
if (!thread__find_map(thread, cpumode, addr, al))
return -1;
}
return 0;
}
/* Invalidate all instruction cache entries that overlap the text poke */
static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
/* Assume text poke begins in a basic block no more than 4096 bytes */
int cnt = 4096 + event->text_poke.new_len;
struct thread *thread = pt->unknown_thread;
struct addr_location al;
struct machine *machine = pt->machine;
struct intel_pt_cache_entry *e;
u64 offset;
int ret = 0;
addr_location__init(&al);
if (!event->text_poke.new_len)
goto out;
for (; cnt; cnt--, addr--) {
struct dso *dso;
if (intel_pt_find_map(thread, cpumode, addr, &al)) {
if (addr < event->text_poke.addr)
goto out;
continue;
}
dso = map__dso(al.map);
if (!dso || !dso->auxtrace_cache)
continue;
offset = map__map_ip(al.map, addr);
e = intel_pt_cache_lookup(dso, machine, offset);
if (!e)
continue;
if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
/*
* No overlap. Working backwards there cannot be another
* basic block that overlaps the text poke if there is a
* branch instruction before the text poke address.
*/
if (e->branch != INTEL_PT_BR_NO_BRANCH)
goto out;
} else {
intel_pt_cache_invalidate(dso, machine, offset);
intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
dso->long_name, addr);
}
}
out:
addr_location__exit(&al);
return ret;
}
static int intel_pt_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
u64 timestamp;
int err = 0;
if (dump_trace)
return 0;
if (!tool->ordered_events) {
pr_err("Intel Processor Trace requires ordered events\n");
return -EINVAL;
}
if (sample->time && sample->time != (u64)-1)
timestamp = perf_time_to_tsc(sample->time, &pt->tc);
else
timestamp = 0;
if (timestamp || pt->timeless_decoding) {
err = intel_pt_update_queues(pt);
if (err)
return err;
}
if (pt->timeless_decoding) {
if (pt->sampling_mode) {
if (sample->aux_sample.size)
err = intel_pt_process_timeless_sample(pt,
sample);
} else if (event->header.type == PERF_RECORD_EXIT) {
err = intel_pt_process_timeless_queues(pt,
event->fork.tid,
sample->time);
}
} else if (timestamp) {
if (!pt->first_timestamp)
intel_pt_first_timestamp(pt, timestamp);
err = intel_pt_process_queues(pt, timestamp);
}
if (err)
return err;
if (event->header.type == PERF_RECORD_SAMPLE) {
if (pt->synth_opts.add_callchain && !sample->callchain)
intel_pt_add_callchain(pt, sample);
if (pt->synth_opts.add_last_branch && !sample->branch_stack)
intel_pt_add_br_stack(pt, sample);
}
if (event->header.type == PERF_RECORD_AUX &&
(event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
pt->synth_opts.errors) {
err = intel_pt_lost(pt, sample);
if (err)
return err;
}
#ifdef HAVE_LIBTRACEEVENT
if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
err = intel_pt_process_switch(pt, sample);
else
#endif
if (event->header.type == PERF_RECORD_ITRACE_START)
err = intel_pt_process_itrace_start(pt, event, sample);
else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
err = intel_pt_process_aux_output_hw_id(pt, event, sample);
else if (event->header.type == PERF_RECORD_SWITCH ||
event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
err = intel_pt_context_switch(pt, event, sample);
if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
err = intel_pt_text_poke(pt, event);
if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
event->header.type, sample->cpu, sample->time, timestamp);
intel_pt_log_event(event);
}
return err;
}
static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
int ret;
if (dump_trace)
return 0;
if (!tool->ordered_events)
return -EINVAL;
ret = intel_pt_update_queues(pt);
if (ret < 0)
return ret;
if (pt->timeless_decoding)
return intel_pt_process_timeless_queues(pt, -1,
MAX_TIMESTAMP - 1);
return intel_pt_process_queues(pt, MAX_TIMESTAMP);
}
static void intel_pt_free_events(struct perf_session *session)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
struct auxtrace_queues *queues = &pt->queues;
unsigned int i;
for (i = 0; i < queues->nr_queues; i++) {
intel_pt_free_queue(queues->queue_array[i].priv);
queues->queue_array[i].priv = NULL;
}
intel_pt_log_disable();
auxtrace_queues__free(queues);
}
static void intel_pt_free(struct perf_session *session)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
auxtrace_heap__free(&pt->heap);
intel_pt_free_events(session);
session->auxtrace = NULL;
intel_pt_free_vmcs_info(pt);
thread__put(pt->unknown_thread);
addr_filters__exit(&pt->filts);
zfree(&pt->chain);
zfree(&pt->filter);
zfree(&pt->time_ranges);
zfree(&pt->br_stack);
free(pt);
}
static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
return evsel->core.attr.type == pt->pmu_type;
}
static int intel_pt_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
if (!pt->data_queued) {
struct auxtrace_buffer *buffer;
off_t data_offset;
int fd = perf_data__fd(session->data);
int err;
if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
if (data_offset == -1)
return -errno;
}
err = auxtrace_queues__add_event(&pt->queues, session, event,
data_offset, &buffer);
if (err)
return err;
/* Dump here now we have copied a piped trace out of the pipe */
if (dump_trace) {
if (auxtrace_buffer__get_data(buffer, fd)) {
intel_pt_dump_event(pt, buffer->data,
buffer->size);
auxtrace_buffer__put_data(buffer);
}
}
}
return 0;
}
static int intel_pt_queue_data(struct perf_session *session,
struct perf_sample *sample,
union perf_event *event, u64 data_offset)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
u64 timestamp;
if (event) {
return auxtrace_queues__add_event(&pt->queues, session, event,
data_offset, NULL);
}
if (sample->time && sample->time != (u64)-1)
timestamp = perf_time_to_tsc(sample->time, &pt->tc);
else
timestamp = 0;
return auxtrace_queues__add_sample(&pt->queues, session, sample,
data_offset, timestamp);
}
struct intel_pt_synth {
struct perf_tool dummy_tool;
struct perf_session *session;
};
static int intel_pt_event_synth(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct intel_pt_synth *intel_pt_synth =
container_of(tool, struct intel_pt_synth, dummy_tool);
return perf_session__deliver_synth_event(intel_pt_synth->session, event,
NULL);
}
static int intel_pt_synth_event(struct perf_session *session, const char *name,
struct perf_event_attr *attr, u64 id)
{
struct intel_pt_synth intel_pt_synth;
int err;
pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
name, id, (u64)attr->sample_type);
memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
intel_pt_synth.session = session;
err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
&id, intel_pt_event_synth);
if (err)
pr_err("%s: failed to synthesize '%s' event type\n",
__func__, name);
return err;
}
static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
const char *name)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.id && evsel->core.id[0] == id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup(name);
break;
}
}
}
static struct evsel *intel_pt_evsel(struct intel_pt *pt,
struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
return evsel;
}
return NULL;
}
static int intel_pt_synth_events(struct intel_pt *pt,
struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel = intel_pt_evsel(pt, evlist);
struct perf_event_attr attr;
u64 id;
int err;
if (!evsel) {
pr_debug("There are no selected events with Intel Processor Trace data\n");
return 0;
}
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.size = sizeof(struct perf_event_attr);
attr.type = PERF_TYPE_HARDWARE;
attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
PERF_SAMPLE_PERIOD;
if (pt->timeless_decoding)
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
else
attr.sample_type |= PERF_SAMPLE_TIME;
if (!pt->per_cpu_mmaps)
attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
attr.exclude_user = evsel->core.attr.exclude_user;
attr.exclude_kernel = evsel->core.attr.exclude_kernel;
attr.exclude_hv = evsel->core.attr.exclude_hv;
attr.exclude_host = evsel->core.attr.exclude_host;
attr.exclude_guest = evsel->core.attr.exclude_guest;
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
if (pt->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
err = intel_pt_synth_event(session, "branches", &attr, id);
if (err)
return err;
pt->sample_branches = true;
pt->branches_sample_type = attr.sample_type;
pt->branches_id = id;
id += 1;
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
/*
* We don't use the hardware index, but the sample generation
* code uses the new format branch_stack with this field,
* so the event attributes must indicate that it's present.
*/
attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
}
if (pt->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
attr.sample_period =
intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
else
attr.sample_period = pt->synth_opts.period;
err = intel_pt_synth_event(session, "instructions", &attr, id);
if (err)
return err;
pt->sample_instructions = true;
pt->instructions_sample_type = attr.sample_type;
pt->instructions_id = id;
id += 1;
}
if (pt->synth_opts.cycles) {
attr.config = PERF_COUNT_HW_CPU_CYCLES;
if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
attr.sample_period =
intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
else
attr.sample_period = pt->synth_opts.period;
err = intel_pt_synth_event(session, "cycles", &attr, id);
if (err)
return err;
pt->sample_cycles = true;
pt->cycles_sample_type = attr.sample_type;
pt->cycles_id = id;
id += 1;
}
attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
attr.sample_period = 1;
if (pt->synth_opts.transactions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
err = intel_pt_synth_event(session, "transactions", &attr, id);
if (err)
return err;
pt->sample_transactions = true;
pt->transactions_sample_type = attr.sample_type;
pt->transactions_id = id;
intel_pt_set_event_name(evlist, id, "transactions");
id += 1;
}
attr.type = PERF_TYPE_SYNTH;
attr.sample_type |= PERF_SAMPLE_RAW;
if (pt->synth_opts.ptwrites) {
attr.config = PERF_SYNTH_INTEL_PTWRITE;
err = intel_pt_synth_event(session, "ptwrite", &attr, id);
if (err)
return err;
pt->sample_ptwrites = true;
pt->ptwrites_sample_type = attr.sample_type;
pt->ptwrites_id = id;
intel_pt_set_event_name(evlist, id, "ptwrite");
id += 1;
}
if (pt->synth_opts.pwr_events) {
pt->sample_pwr_events = true;
pt->pwr_events_sample_type = attr.sample_type;
attr.config = PERF_SYNTH_INTEL_CBR;
err = intel_pt_synth_event(session, "cbr", &attr, id);
if (err)
return err;
pt->cbr_id = id;
intel_pt_set_event_name(evlist, id, "cbr");
id += 1;
attr.config = PERF_SYNTH_INTEL_PSB;
err = intel_pt_synth_event(session, "psb", &attr, id);
if (err)
return err;
pt->psb_id = id;
intel_pt_set_event_name(evlist, id, "psb");
id += 1;
}
if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) {
attr.config = PERF_SYNTH_INTEL_MWAIT;
err = intel_pt_synth_event(session, "mwait", &attr, id);
if (err)
return err;
pt->mwait_id = id;
intel_pt_set_event_name(evlist, id, "mwait");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRE;
err = intel_pt_synth_event(session, "pwre", &attr, id);
if (err)
return err;
pt->pwre_id = id;
intel_pt_set_event_name(evlist, id, "pwre");
id += 1;
attr.config = PERF_SYNTH_INTEL_EXSTOP;
err = intel_pt_synth_event(session, "exstop", &attr, id);
if (err)
return err;
pt->exstop_id = id;
intel_pt_set_event_name(evlist, id, "exstop");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRX;
err = intel_pt_synth_event(session, "pwrx", &attr, id);
if (err)
return err;
pt->pwrx_id = id;
intel_pt_set_event_name(evlist, id, "pwrx");
id += 1;
}
if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) {
attr.config = PERF_SYNTH_INTEL_EVT;
err = intel_pt_synth_event(session, "evt", &attr, id);
if (err)
return err;
pt->evt_sample_type = attr.sample_type;
pt->evt_id = id;
intel_pt_set_event_name(evlist, id, "evt");
id += 1;
}
if (pt->synth_opts.intr_events && pt->cap_event_trace) {
attr.config = PERF_SYNTH_INTEL_IFLAG_CHG;
err = intel_pt_synth_event(session, "iflag", &attr, id);
if (err)
return err;
pt->iflag_chg_sample_type = attr.sample_type;
pt->iflag_chg_id = id;
intel_pt_set_event_name(evlist, id, "iflag");
id += 1;
}
return 0;
}
static void intel_pt_setup_pebs_events(struct intel_pt *pt)
{
struct evsel *evsel;
if (!pt->synth_opts.other_events)
return;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (evsel->core.attr.aux_output && evsel->core.id) {
if (pt->single_pebs) {
pt->single_pebs = false;
return;
}
pt->single_pebs = true;
pt->sample_pebs = true;
pt->pebs_evsel = evsel;
}
}
}
static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry_reverse(evlist, evsel) {
const char *name = evsel__name(evsel);
if (!strcmp(name, "sched:sched_switch"))
return evsel;
}
return NULL;
}
static bool intel_pt_find_switch(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.context_switch)
return true;
}
return false;
}
static int intel_pt_perf_config(const char *var, const char *value, void *data)
{
struct intel_pt *pt = data;
if (!strcmp(var, "intel-pt.mispred-all"))
pt->mispred_all = perf_config_bool(var, value);
if (!strcmp(var, "intel-pt.max-loops"))
perf_config_int(&pt->max_loops, var, value);
return 0;
}
/* Find least TSC which converts to ns or later */
static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
{
u64 tsc, tm;
tsc = perf_time_to_tsc(ns, &pt->tc);
while (1) {
tm = tsc_to_perf_time(tsc, &pt->tc);
if (tm < ns)
break;
tsc -= 1;
}
while (tm < ns)
tm = tsc_to_perf_time(++tsc, &pt->tc);
return tsc;
}
/* Find greatest TSC which converts to ns or earlier */
static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
{
u64 tsc, tm;
tsc = perf_time_to_tsc(ns, &pt->tc);
while (1) {
tm = tsc_to_perf_time(tsc, &pt->tc);
if (tm > ns)
break;
tsc += 1;
}
while (tm > ns)
tm = tsc_to_perf_time(--tsc, &pt->tc);
return tsc;
}
static int intel_pt_setup_time_ranges(struct intel_pt *pt,
struct itrace_synth_opts *opts)
{
struct perf_time_interval *p = opts->ptime_range;
int n = opts->range_num;
int i;
if (!n || !p || pt->timeless_decoding)
return 0;
pt->time_ranges = calloc(n, sizeof(struct range));
if (!pt->time_ranges)
return -ENOMEM;
pt->range_cnt = n;
intel_pt_log("%s: %u range(s)\n", __func__, n);
for (i = 0; i < n; i++) {
struct range *r = &pt->time_ranges[i];
u64 ts = p[i].start;
u64 te = p[i].end;
/*
* Take care to ensure the TSC range matches the perf-time range
* when converted back to perf-time.
*/
r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
r->end = te ? intel_pt_tsc_end(te, pt) : 0;
intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
i, ts, te);
intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
i, r->start, r->end);
}
return 0;
}
static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
{
struct intel_pt_vmcs_info *vmcs_info;
u64 tsc_offset, vmcs;
char *p = *args;
errno = 0;
p = skip_spaces(p);
if (!*p)
return 1;
tsc_offset = strtoull(p, &p, 0);
if (errno)
return -errno;
p = skip_spaces(p);
if (*p != ':') {
pt->dflt_tsc_offset = tsc_offset;
*args = p;
return 0;
}
p += 1;
while (1) {
vmcs = strtoull(p, &p, 0);
if (errno)
return -errno;
if (!vmcs)
return -EINVAL;
vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
if (!vmcs_info)
return -ENOMEM;
p = skip_spaces(p);
if (*p != ',')
break;
p += 1;
}
*args = p;
return 0;
}
static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
{
char *args = pt->synth_opts.vm_tm_corr_args;
int ret;
if (!args)
return 0;
do {
ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
} while (!ret);
if (ret < 0) {
pr_err("Failed to parse VM Time Correlation options\n");
return ret;
}
return 0;
}
static const char * const intel_pt_info_fmts[] = {
[INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
[INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
[INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
[INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
[INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
[INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
[INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
[INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
[INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
[INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
[INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
[INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n",
[INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
[INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
[INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
[INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
[INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
};
static void intel_pt_print_info(__u64 *arr, int start, int finish)
{
int i;
if (!dump_trace)
return;
for (i = start; i <= finish; i++) {
const char *fmt = intel_pt_info_fmts[i];
if (fmt)
fprintf(stdout, fmt, arr[i]);
}
}
static void intel_pt_print_info_str(const char *name, const char *str)
{
if (!dump_trace)
return;
fprintf(stdout, " %-20s%s\n", name, str ? str : "");
}
static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
{
return auxtrace_info->header.size >=
sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
}
int intel_pt_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
struct intel_pt *pt;
void *info_end;
__u64 *info;
int err;
if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
min_sz)
return -EINVAL;
pt = zalloc(sizeof(struct intel_pt));
if (!pt)
return -ENOMEM;
pt->vmcs_info = RB_ROOT;
addr_filters__init(&pt->filts);
err = perf_config(intel_pt_perf_config, pt);
if (err)
goto err_free;
err = auxtrace_queues__init(&pt->queues);
if (err)
goto err_free;
if (session->itrace_synth_opts->set) {
pt->synth_opts = *session->itrace_synth_opts;
} else {
struct itrace_synth_opts *opts = session->itrace_synth_opts;
itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
if (!opts->default_no_sample && !opts->inject) {
pt->synth_opts.branches = false;
pt->synth_opts.callchain = true;
pt->synth_opts.add_callchain = true;
}
pt->synth_opts.thread_stack = opts->thread_stack;
}
if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT))
intel_pt_log_set_name(INTEL_PT_PMU_NAME);
pt->session = session;
pt->machine = &session->machines.host; /* No kvm support */
pt->auxtrace_type = auxtrace_info->type;
pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
INTEL_PT_PER_CPU_MMAPS);
if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
INTEL_PT_CYC_BIT);
}
if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
pt->max_non_turbo_ratio =
auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
intel_pt_print_info(&auxtrace_info->priv[0],
INTEL_PT_MAX_NONTURBO_RATIO,
INTEL_PT_MAX_NONTURBO_RATIO);
}
info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
info_end = (void *)auxtrace_info + auxtrace_info->header.size;
if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
size_t len;
len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
intel_pt_print_info(&auxtrace_info->priv[0],
INTEL_PT_FILTER_STR_LEN,
INTEL_PT_FILTER_STR_LEN);
if (len) {
const char *filter = (const char *)info;
len = roundup(len + 1, 8);
info += len >> 3;
if ((void *)info > info_end) {
pr_err("%s: bad filter string length\n", __func__);
err = -EINVAL;
goto err_free_queues;
}
pt->filter = memdup(filter, len);
if (!pt->filter) {
err = -ENOMEM;
goto err_free_queues;
}
if (session->header.needs_swap)
mem_bswap_64(pt->filter, len);
if (pt->filter[len - 1]) {
pr_err("%s: filter string not null terminated\n", __func__);
err = -EINVAL;
goto err_free_queues;
}
err = addr_filters__parse_bare_filter(&pt->filts,
filter);
if (err)
goto err_free_queues;
}
intel_pt_print_info_str("Filter string", pt->filter);
}
if ((void *)info < info_end) {
pt->cap_event_trace = *info++;
if (dump_trace)
fprintf(stdout, " Cap Event Trace %d\n",
pt->cap_event_trace);
}
pt->timeless_decoding = intel_pt_timeless_decoding(pt);
if (pt->timeless_decoding && !pt->tc.time_mult)
pt->tc.time_mult = 1;
pt->have_tsc = intel_pt_have_tsc(pt);
pt->sampling_mode = intel_pt_sampling_mode(pt);
pt->est_tsc = !pt->timeless_decoding;
if (pt->synth_opts.vm_time_correlation) {
if (pt->timeless_decoding) {
pr_err("Intel PT has no time information for VM Time Correlation\n");
err = -EINVAL;
goto err_free_queues;
}
if (session->itrace_synth_opts->ptime_range) {
pr_err("Time ranges cannot be specified with VM Time Correlation\n");
err = -EINVAL;
goto err_free_queues;
}
/* Currently TSC Offset is calculated using MTC packets */
if (!intel_pt_have_mtc(pt)) {
pr_err("MTC packets must have been enabled for VM Time Correlation\n");
err = -EINVAL;
goto err_free_queues;
}
err = intel_pt_parse_vm_tm_corr_args(pt);
if (err)
goto err_free_queues;
}
pt->unknown_thread = thread__new(999999999, 999999999);
if (!pt->unknown_thread) {
err = -ENOMEM;
goto err_free_queues;
}
err = thread__set_comm(pt->unknown_thread, "unknown", 0);
if (err)
goto err_delete_thread;
if (thread__init_maps(pt->unknown_thread, pt->machine)) {
err = -ENOMEM;
goto err_delete_thread;
}
pt->auxtrace.process_event = intel_pt_process_event;
pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
pt->auxtrace.queue_data = intel_pt_queue_data;
pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
pt->auxtrace.flush_events = intel_pt_flush;
pt->auxtrace.free_events = intel_pt_free_events;
pt->auxtrace.free = intel_pt_free;
pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
session->auxtrace = &pt->auxtrace;
if (dump_trace)
return 0;
if (pt->have_sched_switch == 1) {
pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
if (!pt->switch_evsel) {
pr_err("%s: missing sched_switch event\n", __func__);
err = -EINVAL;
goto err_delete_thread;
}
} else if (pt->have_sched_switch == 2 &&
!intel_pt_find_switch(session->evlist)) {
pr_err("%s: missing context_switch attribute flag\n", __func__);
err = -EINVAL;
goto err_delete_thread;
}
if (pt->synth_opts.log) {
bool log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
unsigned int log_on_error_size = pt->synth_opts.log_on_error_size;
intel_pt_log_enable(log_on_error, log_on_error_size);
}
/* Maximum non-turbo ratio is TSC freq / 100 MHz */
if (pt->tc.time_mult) {
u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
if (!pt->max_non_turbo_ratio)
pt->max_non_turbo_ratio =
(tsc_freq + 50000000) / 100000000;
intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
intel_pt_log("Maximum non-turbo ratio %u\n",
pt->max_non_turbo_ratio);
pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
}
err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
if (err)
goto err_delete_thread;
if (pt->synth_opts.calls)
pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_TRACE_END;
if (pt->synth_opts.returns)
pt->branches_filter |= PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_TRACE_BEGIN;
if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
symbol_conf.use_callchain = false;
pt->synth_opts.callchain = false;
pt->synth_opts.add_callchain = false;
}
}
if (pt->synth_opts.add_callchain) {
err = intel_pt_callchain_init(pt);
if (err)
goto err_delete_thread;
}
if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
pt->br_stack_sz = pt->synth_opts.last_branch_sz;
pt->br_stack_sz_plus = pt->br_stack_sz;
}
if (pt->synth_opts.add_last_branch) {
err = intel_pt_br_stack_init(pt);
if (err)
goto err_delete_thread;
/*
* Additional branch stack size to cater for tracing from the
* actual sample ip to where the sample time is recorded.
* Measured at about 200 branches, but generously set to 1024.
* If kernel space is not being traced, then add just 1 for the
* branch to kernel space.
*/
if (intel_pt_tracing_kernel(pt))
pt->br_stack_sz_plus += 1024;
else
pt->br_stack_sz_plus += 1;
}
pt->use_thread_stack = pt->synth_opts.callchain ||
pt->synth_opts.add_callchain ||
pt->synth_opts.thread_stack ||
pt->synth_opts.last_branch ||
pt->synth_opts.add_last_branch;
pt->callstack = pt->synth_opts.callchain ||
pt->synth_opts.add_callchain ||
pt->synth_opts.thread_stack;
err = intel_pt_synth_events(pt, session);
if (err)
goto err_delete_thread;
intel_pt_setup_pebs_events(pt);
if (perf_data__is_pipe(session->data)) {
pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n"
" The output cannot relied upon. In particular,\n"
" timestamps and the order of events may be incorrect.\n");
}
if (pt->sampling_mode || list_empty(&session->auxtrace_index))
err = auxtrace_queue_data(session, true, true);
else
err = auxtrace_queues__process_index(&pt->queues, session);
if (err)
goto err_delete_thread;
if (pt->queues.populated)
pt->data_queued = true;
if (pt->timeless_decoding)
pr_debug2("Intel PT decoding without timestamps\n");
return 0;
err_delete_thread:
zfree(&pt->chain);
thread__zput(pt->unknown_thread);
err_free_queues:
intel_pt_log_disable();
auxtrace_queues__free(&pt->queues);
session->auxtrace = NULL;
err_free:
addr_filters__exit(&pt->filts);
zfree(&pt->filter);
zfree(&pt->time_ranges);
free(pt);
return err;
}
| linux-master | tools/perf/util/intel-pt.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "debug.h"
#include "demangle-rust.h"
/*
* Mangled Rust symbols look like this:
*
* _$LT$std..sys..fd..FileDesc$u20$as$u20$core..ops..Drop$GT$::drop::hc68340e1baa4987a
*
* The original symbol is:
*
* <std::sys::fd::FileDesc as core::ops::Drop>::drop
*
* The last component of the path is a 64-bit hash in lowercase hex, prefixed
* with "h". Rust does not have a global namespace between crates, an illusion
* which Rust maintains by using the hash to distinguish things that would
* otherwise have the same symbol.
*
* Any path component not starting with a XID_Start character is prefixed with
* "_".
*
* The following escape sequences are used:
*
* "," => $C$
* "@" => $SP$
* "*" => $BP$
* "&" => $RF$
* "<" => $LT$
* ">" => $GT$
* "(" => $LP$
* ")" => $RP$
* " " => $u20$
* "'" => $u27$
* "[" => $u5b$
* "]" => $u5d$
* "~" => $u7e$
*
* A double ".." means "::" and a single "." means "-".
*
* The only characters allowed in the mangled symbol are a-zA-Z0-9 and _.:$
*/
static const char *hash_prefix = "::h";
static const size_t hash_prefix_len = 3;
static const size_t hash_len = 16;
static bool is_prefixed_hash(const char *start);
static bool looks_like_rust(const char *sym, size_t len);
static bool unescape(const char **in, char **out, const char *seq, char value);
/*
* INPUT:
* sym: symbol that has been through BFD-demangling
*
* This function looks for the following indicators:
*
* 1. The hash must consist of "h" followed by 16 lowercase hex digits.
*
* 2. As a sanity check, the hash must use between 5 and 15 of the 16 possible
* hex digits. This is true of 99.9998% of hashes so once in your life you
* may see a false negative. The point is to notice path components that
* could be Rust hashes but are probably not, like "haaaaaaaaaaaaaaaa". In
* this case a false positive (non-Rust symbol has an important path
* component removed because it looks like a Rust hash) is worse than a
* false negative (the rare Rust symbol is not demangled) so this sets the
* balance in favor of false negatives.
*
* 3. There must be no characters other than a-zA-Z0-9 and _.:$
*
* 4. There must be no unrecognized $-sign sequences.
*
* 5. There must be no sequence of three or more dots in a row ("...").
*/
bool
rust_is_mangled(const char *sym)
{
size_t len, len_without_hash;
if (!sym)
return false;
len = strlen(sym);
if (len <= hash_prefix_len + hash_len)
/* Not long enough to contain "::h" + hash + something else */
return false;
len_without_hash = len - (hash_prefix_len + hash_len);
if (!is_prefixed_hash(sym + len_without_hash))
return false;
return looks_like_rust(sym, len_without_hash);
}
/*
* A hash is the prefix "::h" followed by 16 lowercase hex digits. The hex
* digits must comprise between 5 and 15 (inclusive) distinct digits.
*/
static bool is_prefixed_hash(const char *str)
{
const char *end;
bool seen[16];
size_t i;
int count;
if (strncmp(str, hash_prefix, hash_prefix_len))
return false;
str += hash_prefix_len;
memset(seen, false, sizeof(seen));
for (end = str + hash_len; str < end; str++)
if (*str >= '0' && *str <= '9')
seen[*str - '0'] = true;
else if (*str >= 'a' && *str <= 'f')
seen[*str - 'a' + 10] = true;
else
return false;
/* Count how many distinct digits seen */
count = 0;
for (i = 0; i < 16; i++)
if (seen[i])
count++;
return count >= 5 && count <= 15;
}
static bool looks_like_rust(const char *str, size_t len)
{
const char *end = str + len;
while (str < end)
switch (*str) {
case '$':
if (!strncmp(str, "$C$", 3))
str += 3;
else if (!strncmp(str, "$SP$", 4)
|| !strncmp(str, "$BP$", 4)
|| !strncmp(str, "$RF$", 4)
|| !strncmp(str, "$LT$", 4)
|| !strncmp(str, "$GT$", 4)
|| !strncmp(str, "$LP$", 4)
|| !strncmp(str, "$RP$", 4))
str += 4;
else if (!strncmp(str, "$u20$", 5)
|| !strncmp(str, "$u27$", 5)
|| !strncmp(str, "$u5b$", 5)
|| !strncmp(str, "$u5d$", 5)
|| !strncmp(str, "$u7e$", 5))
str += 5;
else
return false;
break;
case '.':
/* Do not allow three or more consecutive dots */
if (!strncmp(str, "...", 3))
return false;
/* Fall through */
case 'a' ... 'z':
case 'A' ... 'Z':
case '0' ... '9':
case '_':
case ':':
str++;
break;
default:
return false;
}
return true;
}
/*
* INPUT:
* sym: symbol for which rust_is_mangled(sym) returns true
*
* The input is demangled in-place because the mangled name is always longer
* than the demangled one.
*/
void
rust_demangle_sym(char *sym)
{
const char *in;
char *out;
const char *end;
if (!sym)
return;
in = sym;
out = sym;
end = sym + strlen(sym) - (hash_prefix_len + hash_len);
while (in < end)
switch (*in) {
case '$':
if (!(unescape(&in, &out, "$C$", ',')
|| unescape(&in, &out, "$SP$", '@')
|| unescape(&in, &out, "$BP$", '*')
|| unescape(&in, &out, "$RF$", '&')
|| unescape(&in, &out, "$LT$", '<')
|| unescape(&in, &out, "$GT$", '>')
|| unescape(&in, &out, "$LP$", '(')
|| unescape(&in, &out, "$RP$", ')')
|| unescape(&in, &out, "$u20$", ' ')
|| unescape(&in, &out, "$u27$", '\'')
|| unescape(&in, &out, "$u5b$", '[')
|| unescape(&in, &out, "$u5d$", ']')
|| unescape(&in, &out, "$u7e$", '~'))) {
pr_err("demangle-rust: unexpected escape sequence");
goto done;
}
break;
case '_':
/*
* If this is the start of a path component and the next
* character is an escape sequence, ignore the
* underscore. The mangler inserts an underscore to make
* sure the path component begins with a XID_Start
* character.
*/
if ((in == sym || in[-1] == ':') && in[1] == '$')
in++;
else
*out++ = *in++;
break;
case '.':
if (in[1] == '.') {
/* ".." becomes "::" */
*out++ = ':';
*out++ = ':';
in += 2;
} else {
/* "." becomes "-" */
*out++ = '-';
in++;
}
break;
case 'a' ... 'z':
case 'A' ... 'Z':
case '0' ... '9':
case ':':
*out++ = *in++;
break;
default:
pr_err("demangle-rust: unexpected character '%c' in symbol\n",
*in);
goto done;
}
done:
*out = '\0';
}
static bool unescape(const char **in, char **out, const char *seq, char value)
{
size_t len = strlen(seq);
if (strncmp(*in, seq, len))
return false;
**out = value;
*in += len;
*out += 1;
return true;
}
| linux-master | tools/perf/util/demangle-rust.c |
// SPDX-License-Identifier: GPL-2.0
#include "topdown.h"
#include <linux/kernel.h>
__weak bool arch_topdown_sample_read(struct evsel *leader __maybe_unused)
{
return false;
}
| linux-master | tools/perf/util/topdown.c |
// SPDX-License-Identifier: GPL-2.0
#include "tracepoint.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/param.h>
#include <unistd.h>
#include <api/fs/tracing_path.h>
int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
{
char evt_path[MAXPATHLEN];
int fd;
snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name);
fd = open(evt_path, O_RDONLY);
if (fd < 0)
return -EINVAL;
close(fd);
return 0;
}
/*
* Check whether event is in <debugfs_mount_point>/tracing/events
*/
int is_valid_tracepoint(const char *event_string)
{
DIR *sys_dir, *evt_dir;
struct dirent *sys_dirent, *evt_dirent;
char evt_path[MAXPATHLEN];
char *dir_path;
sys_dir = tracing_events__opendir();
if (!sys_dir)
return 0;
for_each_subsystem(sys_dir, sys_dirent) {
dir_path = get_events_file(sys_dirent->d_name);
if (!dir_path)
continue;
evt_dir = opendir(dir_path);
if (!evt_dir)
goto next;
for_each_event(dir_path, evt_dir, evt_dirent) {
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent->d_name, evt_dirent->d_name);
if (!strcmp(evt_path, event_string)) {
closedir(evt_dir);
put_events_file(dir_path);
closedir(sys_dir);
return 1;
}
}
closedir(evt_dir);
next:
put_events_file(dir_path);
}
closedir(sys_dir);
return 0;
}
| linux-master | tools/perf/util/tracepoint.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2018
* Auxtrace support for s390 CPU-Measurement Sampling Facility
*
* Author(s): Thomas Richter <[email protected]>
*
* Auxiliary traces are collected during 'perf record' using rbd000 event.
* Several PERF_RECORD_XXX are generated during recording:
*
* PERF_RECORD_AUX:
* Records that new data landed in the AUX buffer part.
* PERF_RECORD_AUXTRACE:
* Defines auxtrace data. Followed by the actual data. The contents of
* the auxtrace data is dependent on the event and the CPU.
* This record is generated by perf record command. For details
* see Documentation/perf.data-file-format.txt.
* PERF_RECORD_AUXTRACE_INFO:
* Defines a table of contains for PERF_RECORD_AUXTRACE records. This
* record is generated during 'perf record' command. Each record contains
* up to 256 entries describing offset and size of the AUXTRACE data in the
* perf.data file.
* PERF_RECORD_AUXTRACE_ERROR:
* Indicates an error during AUXTRACE collection such as buffer overflow.
* PERF_RECORD_FINISHED_ROUND:
* Perf events are not necessarily in time stamp order, as they can be
* collected in parallel on different CPUs. If the events should be
* processed in time order they need to be sorted first.
* Perf report guarantees that there is no reordering over a
* PERF_RECORD_FINISHED_ROUND boundary event. All perf records with a
* time stamp lower than this record are processed (and displayed) before
* the succeeding perf record are processed.
*
* These records are evaluated during perf report command.
*
* 1. PERF_RECORD_AUXTRACE_INFO is used to set up the infrastructure for
* auxiliary trace data processing. See s390_cpumsf_process_auxtrace_info()
* below.
* Auxiliary trace data is collected per CPU. To merge the data into the report
* an auxtrace_queue is created for each CPU. It is assumed that the auxtrace
* data is in ascending order.
*
* Each queue has a double linked list of auxtrace_buffers. This list contains
* the offset and size of a CPU's auxtrace data. During auxtrace processing
* the data portion is mmap()'ed.
*
* To sort the queues in chronological order, all queue access is controlled
* by the auxtrace_heap. This is basically a stack, each stack element has two
* entries, the queue number and a time stamp. However the stack is sorted by
* the time stamps. The highest time stamp is at the bottom the lowest
* (nearest) time stamp is at the top. That sort order is maintained at all
* times!
*
* After the auxtrace infrastructure has been setup, the auxtrace queues are
* filled with data (offset/size pairs) and the auxtrace_heap is populated.
*
* 2. PERF_RECORD_XXX processing triggers access to the auxtrace_queues.
* Each record is handled by s390_cpumsf_process_event(). The time stamp of
* the perf record is compared with the time stamp located on the auxtrace_heap
* top element. If that time stamp is lower than the time stamp from the
* record sample, the auxtrace queues will be processed. As auxtrace queues
* control many auxtrace_buffers and each buffer can be quite large, the
* auxtrace buffer might be processed only partially. In this case the
* position in the auxtrace_buffer of that queue is remembered and the time
* stamp of the last processed entry of the auxtrace_buffer replaces the
* current auxtrace_heap top.
*
* 3. Auxtrace_queues might run of out data and are fed by the
* PERF_RECORD_AUXTRACE handling, see s390_cpumsf_process_auxtrace_event().
*
* Event Generation
* Each sampling-data entry in the auxiliary trace data generates a perf sample.
* This sample is filled
* with data from the auxtrace such as PID/TID, instruction address, CPU state,
* etc. This sample is processed with perf_session__deliver_synth_event() to
* be included into the GUI.
*
* 4. PERF_RECORD_FINISHED_ROUND event is used to process all the remaining
* auxiliary traces entries until the time stamp of this record is reached
* auxtrace_heap top. This is triggered by ordered_event->deliver().
*
*
* Perf event processing.
* Event processing of PERF_RECORD_XXX entries relies on time stamp entries.
* This is the function call sequence:
*
* __cmd_report()
* |
* perf_session__process_events()
* |
* __perf_session__process_events()
* |
* perf_session__process_event()
* | This functions splits the PERF_RECORD_XXX records.
* | - Those generated by perf record command (type number equal or higher
* | than PERF_RECORD_USER_TYPE_START) are handled by
* | perf_session__process_user_event(see below)
* | - Those generated by the kernel are handled by
* | evlist__parse_sample_timestamp()
* |
* evlist__parse_sample_timestamp()
* | Extract time stamp from sample data.
* |
* perf_session__queue_event()
* | If timestamp is positive the sample is entered into an ordered_event
* | list, sort order is the timestamp. The event processing is deferred until
* | later (see perf_session__process_user_event()).
* | Other timestamps (0 or -1) are handled immediately by
* | perf_session__deliver_event(). These are events generated at start up
* | of command perf record. They create PERF_RECORD_COMM and PERF_RECORD_MMAP*
* | records. They are needed to create a list of running processes and its
* | memory mappings and layout. They are needed at the beginning to enable
* | command perf report to create process trees and memory mappings.
* |
* perf_session__deliver_event()
* | Delivers a PERF_RECORD_XXX entry for handling.
* |
* auxtrace__process_event()
* | The timestamp of the PERF_RECORD_XXX entry is taken to correlate with
* | time stamps from the auxiliary trace buffers. This enables
* | synchronization between auxiliary trace data and the events on the
* | perf.data file.
* |
* machine__deliver_event()
* | Handles the PERF_RECORD_XXX event. This depends on the record type.
* It might update the process tree, update a process memory map or enter
* a sample with IP and call back chain data into GUI data pool.
*
*
* Deferred processing determined by perf_session__process_user_event() is
* finally processed when a PERF_RECORD_FINISHED_ROUND is encountered. These
* are generated during command perf record.
* The timestamp of PERF_RECORD_FINISHED_ROUND event is taken to process all
* PERF_RECORD_XXX entries stored in the ordered_event list. This list was
* built up while reading the perf.data file.
* Each event is now processed by calling perf_session__deliver_event().
* This enables time synchronization between the data in the perf.data file and
* the data in the auxiliary trace buffers.
*/
#include <endian.h>
#include <errno.h>
#include <byteswap.h>
#include <inttypes.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "color.h"
#include "evsel.h"
#include "evlist.h"
#include "machine.h"
#include "session.h"
#include "tool.h"
#include "debug.h"
#include "auxtrace.h"
#include "s390-cpumsf.h"
#include "s390-cpumsf-kernel.h"
#include "s390-cpumcf-kernel.h"
#include "config.h"
#include "util/sample.h"
struct s390_cpumsf {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
struct auxtrace_heap heap;
struct perf_session *session;
struct machine *machine;
u32 auxtrace_type;
u32 pmu_type;
u16 machine_type;
bool data_queued;
bool use_logfile;
char *logdir;
};
struct s390_cpumsf_queue {
struct s390_cpumsf *sf;
unsigned int queue_nr;
struct auxtrace_buffer *buffer;
int cpu;
FILE *logfile;
FILE *logfile_ctr;
};
/* Check if the raw data should be dumped to file. If this is the case and
* the file to dump to has not been opened for writing, do so.
*
* Return 0 on success and greater zero on error so processing continues.
*/
static int s390_cpumcf_dumpctr(struct s390_cpumsf *sf,
struct perf_sample *sample)
{
struct s390_cpumsf_queue *sfq;
struct auxtrace_queue *q;
int rc = 0;
if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
return rc;
q = &sf->queues.queue_array[sample->cpu];
sfq = q->priv;
if (!sfq) /* Queue not yet allocated */
return rc;
if (!sfq->logfile_ctr) {
char *name;
rc = (sf->logdir)
? asprintf(&name, "%s/aux.ctr.%02x",
sf->logdir, sample->cpu)
: asprintf(&name, "aux.ctr.%02x", sample->cpu);
if (rc > 0)
sfq->logfile_ctr = fopen(name, "w");
if (sfq->logfile_ctr == NULL) {
pr_err("Failed to open counter set log file %s, "
"continue...\n", name);
rc = 1;
}
free(name);
}
if (sfq->logfile_ctr) {
/* See comment above for -4 */
size_t n = fwrite(sample->raw_data, sample->raw_size - 4, 1,
sfq->logfile_ctr);
if (n != 1) {
pr_err("Failed to write counter set data\n");
rc = 1;
}
}
return rc;
}
/* Display s390 CPU measurement facility basic-sampling data entry
* Data written on s390 in big endian byte order and contains bit
* fields across byte boundaries.
*/
static bool s390_cpumsf_basic_show(const char *color, size_t pos,
struct hws_basic_entry *basicp)
{
struct hws_basic_entry *basic = basicp;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
struct hws_basic_entry local;
unsigned long long word = be64toh(*(unsigned long long *)basicp);
memset(&local, 0, sizeof(local));
local.def = be16toh(basicp->def);
local.prim_asn = word & 0xffff;
local.CL = word >> 30 & 0x3;
local.I = word >> 32 & 0x1;
local.AS = word >> 33 & 0x3;
local.P = word >> 35 & 0x1;
local.W = word >> 36 & 0x1;
local.T = word >> 37 & 0x1;
local.U = word >> 40 & 0xf;
local.ia = be64toh(basicp->ia);
local.gpp = be64toh(basicp->gpp);
local.hpp = be64toh(basicp->hpp);
basic = &local;
#endif
if (basic->def != 1) {
pr_err("Invalid AUX trace basic entry [%#08zx]\n", pos);
return false;
}
color_fprintf(stdout, color, " [%#08zx] Basic Def:%04x Inst:%#04x"
" %c%c%c%c AS:%d ASN:%#04x IA:%#018llx\n"
"\t\tCL:%d HPP:%#018llx GPP:%#018llx\n",
pos, basic->def, basic->U,
basic->T ? 'T' : ' ',
basic->W ? 'W' : ' ',
basic->P ? 'P' : ' ',
basic->I ? 'I' : ' ',
basic->AS, basic->prim_asn, basic->ia, basic->CL,
basic->hpp, basic->gpp);
return true;
}
/* Display s390 CPU measurement facility diagnostic-sampling data entry.
* Data written on s390 in big endian byte order and contains bit
* fields across byte boundaries.
*/
static bool s390_cpumsf_diag_show(const char *color, size_t pos,
struct hws_diag_entry *diagp)
{
struct hws_diag_entry *diag = diagp;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
struct hws_diag_entry local;
unsigned long long word = be64toh(*(unsigned long long *)diagp);
local.def = be16toh(diagp->def);
local.I = word >> 32 & 0x1;
diag = &local;
#endif
if (diag->def < S390_CPUMSF_DIAG_DEF_FIRST) {
pr_err("Invalid AUX trace diagnostic entry [%#08zx]\n", pos);
return false;
}
color_fprintf(stdout, color, " [%#08zx] Diag Def:%04x %c\n",
pos, diag->def, diag->I ? 'I' : ' ');
return true;
}
/* Return TOD timestamp contained in an trailer entry */
static unsigned long long trailer_timestamp(struct hws_trailer_entry *te,
int idx)
{
/* te->t set: TOD in STCKE format, bytes 8-15
* to->t not set: TOD in STCK format, bytes 0-7
*/
unsigned long long ts;
memcpy(&ts, &te->timestamp[idx], sizeof(ts));
return be64toh(ts);
}
/* Display s390 CPU measurement facility trailer entry */
static bool s390_cpumsf_trailer_show(const char *color, size_t pos,
struct hws_trailer_entry *te)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
struct hws_trailer_entry local;
const unsigned long long flags = be64toh(te->flags);
memset(&local, 0, sizeof(local));
local.f = flags >> 63 & 0x1;
local.a = flags >> 62 & 0x1;
local.t = flags >> 61 & 0x1;
local.bsdes = be16toh((flags >> 16 & 0xffff));
local.dsdes = be16toh((flags & 0xffff));
memcpy(&local.timestamp, te->timestamp, sizeof(te->timestamp));
local.overflow = be64toh(te->overflow);
local.clock_base = be64toh(te->progusage[0]) >> 63 & 1;
local.progusage2 = be64toh(te->progusage2);
te = &local;
#endif
if (te->bsdes != sizeof(struct hws_basic_entry)) {
pr_err("Invalid AUX trace trailer entry [%#08zx]\n", pos);
return false;
}
color_fprintf(stdout, color, " [%#08zx] Trailer %c%c%c bsdes:%d"
" dsdes:%d Overflow:%lld Time:%#llx\n"
"\t\tC:%d TOD:%#lx\n",
pos,
te->f ? 'F' : ' ',
te->a ? 'A' : ' ',
te->t ? 'T' : ' ',
te->bsdes, te->dsdes, te->overflow,
trailer_timestamp(te, te->clock_base),
te->clock_base, te->progusage2);
return true;
}
/* Test a sample data block. It must be 4KB or a multiple thereof in size and
* 4KB page aligned. Each sample data page has a trailer entry at the
* end which contains the sample entry data sizes.
*
* Return true if the sample data block passes the checks and set the
* basic set entry size and diagnostic set entry size.
*
* Return false on failure.
*
* Note: Old hardware does not set the basic or diagnostic entry sizes
* in the trailer entry. Use the type number instead.
*/
static bool s390_cpumsf_validate(int machine_type,
unsigned char *buf, size_t len,
unsigned short *bsdes,
unsigned short *dsdes)
{
struct hws_basic_entry *basic = (struct hws_basic_entry *)buf;
struct hws_trailer_entry *te;
*dsdes = *bsdes = 0;
if (len & (S390_CPUMSF_PAGESZ - 1)) /* Illegal size */
return false;
if (be16toh(basic->def) != 1) /* No basic set entry, must be first */
return false;
/* Check for trailer entry at end of SDB */
te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ
- sizeof(*te));
*bsdes = be16toh(te->bsdes);
*dsdes = be16toh(te->dsdes);
if (!te->bsdes && !te->dsdes) {
/* Very old hardware, use CPUID */
switch (machine_type) {
case 2097:
case 2098:
*dsdes = 64;
*bsdes = 32;
break;
case 2817:
case 2818:
*dsdes = 74;
*bsdes = 32;
break;
case 2827:
case 2828:
*dsdes = 85;
*bsdes = 32;
break;
case 2964:
case 2965:
*dsdes = 112;
*bsdes = 32;
break;
default:
/* Illegal trailer entry */
return false;
}
}
return true;
}
/* Return true if there is room for another entry */
static bool s390_cpumsf_reached_trailer(size_t entry_sz, size_t pos)
{
size_t payload = S390_CPUMSF_PAGESZ - sizeof(struct hws_trailer_entry);
if (payload - (pos & (S390_CPUMSF_PAGESZ - 1)) < entry_sz)
return false;
return true;
}
/* Dump an auxiliary buffer. These buffers are multiple of
* 4KB SDB pages.
*/
static void s390_cpumsf_dump(struct s390_cpumsf *sf,
unsigned char *buf, size_t len)
{
const char *color = PERF_COLOR_BLUE;
struct hws_basic_entry *basic;
struct hws_diag_entry *diag;
unsigned short bsdes, dsdes;
size_t pos = 0;
color_fprintf(stdout, color,
". ... s390 AUX data: size %zu bytes\n",
len);
if (!s390_cpumsf_validate(sf->machine_type, buf, len, &bsdes,
&dsdes)) {
pr_err("Invalid AUX trace data block size:%zu"
" (type:%d bsdes:%hd dsdes:%hd)\n",
len, sf->machine_type, bsdes, dsdes);
return;
}
/* s390 kernel always returns 4KB blocks fully occupied,
* no partially filled SDBs.
*/
while (pos < len) {
/* Handle Basic entry */
basic = (struct hws_basic_entry *)(buf + pos);
if (s390_cpumsf_basic_show(color, pos, basic))
pos += bsdes;
else
return;
/* Handle Diagnostic entry */
diag = (struct hws_diag_entry *)(buf + pos);
if (s390_cpumsf_diag_show(color, pos, diag))
pos += dsdes;
else
return;
/* Check for trailer entry */
if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) {
/* Show trailer entry */
struct hws_trailer_entry te;
pos = (pos + S390_CPUMSF_PAGESZ)
& ~(S390_CPUMSF_PAGESZ - 1);
pos -= sizeof(te);
memcpy(&te, buf + pos, sizeof(te));
/* Set descriptor sizes in case of old hardware
* where these values are not set.
*/
te.bsdes = bsdes;
te.dsdes = dsdes;
if (s390_cpumsf_trailer_show(color, pos, &te))
pos += sizeof(te);
else
return;
}
}
}
static void s390_cpumsf_dump_event(struct s390_cpumsf *sf, unsigned char *buf,
size_t len)
{
printf(".\n");
s390_cpumsf_dump(sf, buf, len);
}
#define S390_LPP_PID_MASK 0xffffffff
static bool s390_cpumsf_make_event(size_t pos,
struct hws_basic_entry *basic,
struct s390_cpumsf_queue *sfq)
{
struct perf_sample sample = {
.ip = basic->ia,
.pid = basic->hpp & S390_LPP_PID_MASK,
.tid = basic->hpp & S390_LPP_PID_MASK,
.cpumode = PERF_RECORD_MISC_CPUMODE_UNKNOWN,
.cpu = sfq->cpu,
.period = 1
};
union perf_event event;
memset(&event, 0, sizeof(event));
if (basic->CL == 1) /* Native LPAR mode */
sample.cpumode = basic->P ? PERF_RECORD_MISC_USER
: PERF_RECORD_MISC_KERNEL;
else if (basic->CL == 2) /* Guest kernel/user space */
sample.cpumode = basic->P ? PERF_RECORD_MISC_GUEST_USER
: PERF_RECORD_MISC_GUEST_KERNEL;
else if (basic->gpp || basic->prim_asn != 0xffff)
/* Use heuristics on old hardware */
sample.cpumode = basic->P ? PERF_RECORD_MISC_GUEST_USER
: PERF_RECORD_MISC_GUEST_KERNEL;
else
sample.cpumode = basic->P ? PERF_RECORD_MISC_USER
: PERF_RECORD_MISC_KERNEL;
event.sample.header.type = PERF_RECORD_SAMPLE;
event.sample.header.misc = sample.cpumode;
event.sample.header.size = sizeof(struct perf_event_header);
pr_debug4("%s pos:%#zx ip:%#" PRIx64 " P:%d CL:%d pid:%d.%d cpumode:%d cpu:%d\n",
__func__, pos, sample.ip, basic->P, basic->CL, sample.pid,
sample.tid, sample.cpumode, sample.cpu);
if (perf_session__deliver_synth_event(sfq->sf->session, &event,
&sample)) {
pr_err("s390 Auxiliary Trace: failed to deliver event\n");
return false;
}
return true;
}
static unsigned long long get_trailer_time(const unsigned char *buf)
{
struct hws_trailer_entry *te;
unsigned long long aux_time, progusage2;
bool clock_base;
te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ
- sizeof(*te));
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
clock_base = be64toh(te->progusage[0]) >> 63 & 0x1;
progusage2 = be64toh(te->progusage[1]);
#else
clock_base = te->clock_base;
progusage2 = te->progusage2;
#endif
if (!clock_base) /* TOD_CLOCK_BASE value missing */
return 0;
/* Correct calculation to convert time stamp in trailer entry to
* nano seconds (taken from arch/s390 function tod_to_ns()).
* TOD_CLOCK_BASE is stored in trailer entry member progusage2.
*/
aux_time = trailer_timestamp(te, clock_base) - progusage2;
aux_time = (aux_time >> 9) * 125 + (((aux_time & 0x1ff) * 125) >> 9);
return aux_time;
}
/* Process the data samples of a single queue. The first parameter is a
* pointer to the queue, the second parameter is the time stamp. This
* is the time stamp:
* - of the event that triggered this processing.
* - or the time stamp when the last processing of this queue stopped.
* In this case it stopped at a 4KB page boundary and record the
* position on where to continue processing on the next invocation
* (see buffer->use_data and buffer->use_size).
*
* When this function returns the second parameter is updated to
* reflect the time stamp of the last processed auxiliary data entry
* (taken from the trailer entry of that page). The caller uses this
* returned time stamp to record the last processed entry in this
* queue.
*
* The function returns:
* 0: Processing successful. The second parameter returns the
* time stamp from the trailer entry until which position
* processing took place. Subsequent calls resume from this
* position.
* <0: An error occurred during processing. The second parameter
* returns the maximum time stamp.
* >0: Done on this queue. The second parameter returns the
* maximum time stamp.
*/
static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
{
struct s390_cpumsf *sf = sfq->sf;
unsigned char *buf = sfq->buffer->use_data;
size_t len = sfq->buffer->use_size;
struct hws_basic_entry *basic;
unsigned short bsdes, dsdes;
size_t pos = 0;
int err = 1;
u64 aux_ts;
if (!s390_cpumsf_validate(sf->machine_type, buf, len, &bsdes,
&dsdes)) {
*ts = ~0ULL;
return -1;
}
/* Get trailer entry time stamp and check if entries in
* this auxiliary page are ready for processing. If the
* time stamp of the first entry is too high, whole buffer
* can be skipped. In this case return time stamp.
*/
aux_ts = get_trailer_time(buf);
if (!aux_ts) {
pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
(s64)sfq->buffer->data_offset);
aux_ts = ~0ULL;
goto out;
}
if (aux_ts > *ts) {
*ts = aux_ts;
return 0;
}
while (pos < len) {
/* Handle Basic entry */
basic = (struct hws_basic_entry *)(buf + pos);
if (s390_cpumsf_make_event(pos, basic, sfq))
pos += bsdes;
else {
err = -EBADF;
goto out;
}
pos += dsdes; /* Skip diagnostic entry */
/* Check for trailer entry */
if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) {
pos = (pos + S390_CPUMSF_PAGESZ)
& ~(S390_CPUMSF_PAGESZ - 1);
/* Check existence of next page */
if (pos >= len)
break;
aux_ts = get_trailer_time(buf + pos);
if (!aux_ts) {
aux_ts = ~0ULL;
goto out;
}
if (aux_ts > *ts) {
*ts = aux_ts;
sfq->buffer->use_data += pos;
sfq->buffer->use_size -= pos;
return 0;
}
}
}
out:
*ts = aux_ts;
sfq->buffer->use_size = 0;
sfq->buffer->use_data = NULL;
return err; /* Buffer completely scanned or error */
}
/* Run the s390 auxiliary trace decoder.
* Select the queue buffer to operate on, the caller already selected
* the proper queue, depending on second parameter 'ts'.
* This is the time stamp until which the auxiliary entries should
* be processed. This value is updated by called functions and
* returned to the caller.
*
* Resume processing in the current buffer. If there is no buffer
* get a new buffer from the queue and setup start position for
* processing.
* When a buffer is completely processed remove it from the queue
* before returning.
*
* This function returns
* 1: When the queue is empty. Second parameter will be set to
* maximum time stamp.
* 0: Normal processing done.
* <0: Error during queue buffer setup. This causes the caller
* to stop processing completely.
*/
static int s390_cpumsf_run_decoder(struct s390_cpumsf_queue *sfq,
u64 *ts)
{
struct auxtrace_buffer *buffer;
struct auxtrace_queue *queue;
int err;
queue = &sfq->sf->queues.queue_array[sfq->queue_nr];
/* Get buffer and last position in buffer to resume
* decoding the auxiliary entries. One buffer might be large
* and decoding might stop in between. This depends on the time
* stamp of the trailer entry in each page of the auxiliary
* data and the time stamp of the event triggering the decoding.
*/
if (sfq->buffer == NULL) {
sfq->buffer = buffer = auxtrace_buffer__next(queue,
sfq->buffer);
if (!buffer) {
*ts = ~0ULL;
return 1; /* Processing done on this queue */
}
/* Start with a new buffer on this queue */
if (buffer->data) {
buffer->use_size = buffer->size;
buffer->use_data = buffer->data;
}
if (sfq->logfile) { /* Write into log file */
size_t rc = fwrite(buffer->data, buffer->size, 1,
sfq->logfile);
if (rc != 1)
pr_err("Failed to write auxiliary data\n");
}
} else
buffer = sfq->buffer;
if (!buffer->data) {
int fd = perf_data__fd(sfq->sf->session->data);
buffer->data = auxtrace_buffer__get_data(buffer, fd);
if (!buffer->data)
return -ENOMEM;
buffer->use_size = buffer->size;
buffer->use_data = buffer->data;
if (sfq->logfile) { /* Write into log file */
size_t rc = fwrite(buffer->data, buffer->size, 1,
sfq->logfile);
if (rc != 1)
pr_err("Failed to write auxiliary data\n");
}
}
pr_debug4("%s queue_nr:%d buffer:%" PRId64 " offset:%#" PRIx64 " size:%#zx rest:%#zx\n",
__func__, sfq->queue_nr, buffer->buffer_nr, buffer->offset,
buffer->size, buffer->use_size);
err = s390_cpumsf_samples(sfq, ts);
/* If non-zero, there is either an error (err < 0) or the buffer is
* completely done (err > 0). The error is unrecoverable, usually
* some descriptors could not be read successfully, so continue with
* the next buffer.
* In both cases the parameter 'ts' has been updated.
*/
if (err) {
sfq->buffer = NULL;
list_del_init(&buffer->list);
auxtrace_buffer__free(buffer);
if (err > 0) /* Buffer done, no error */
err = 0;
}
return err;
}
static struct s390_cpumsf_queue *
s390_cpumsf_alloc_queue(struct s390_cpumsf *sf, unsigned int queue_nr)
{
struct s390_cpumsf_queue *sfq;
sfq = zalloc(sizeof(struct s390_cpumsf_queue));
if (sfq == NULL)
return NULL;
sfq->sf = sf;
sfq->queue_nr = queue_nr;
sfq->cpu = -1;
if (sf->use_logfile) {
char *name;
int rc;
rc = (sf->logdir)
? asprintf(&name, "%s/aux.smp.%02x",
sf->logdir, queue_nr)
: asprintf(&name, "aux.smp.%02x", queue_nr);
if (rc > 0)
sfq->logfile = fopen(name, "w");
if (sfq->logfile == NULL) {
pr_err("Failed to open auxiliary log file %s,"
"continue...\n", name);
sf->use_logfile = false;
}
free(name);
}
return sfq;
}
static int s390_cpumsf_setup_queue(struct s390_cpumsf *sf,
struct auxtrace_queue *queue,
unsigned int queue_nr, u64 ts)
{
struct s390_cpumsf_queue *sfq = queue->priv;
if (list_empty(&queue->head))
return 0;
if (sfq == NULL) {
sfq = s390_cpumsf_alloc_queue(sf, queue_nr);
if (!sfq)
return -ENOMEM;
queue->priv = sfq;
if (queue->cpu != -1)
sfq->cpu = queue->cpu;
}
return auxtrace_heap__add(&sf->heap, queue_nr, ts);
}
static int s390_cpumsf_setup_queues(struct s390_cpumsf *sf, u64 ts)
{
unsigned int i;
int ret = 0;
for (i = 0; i < sf->queues.nr_queues; i++) {
ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i],
i, ts);
if (ret)
break;
}
return ret;
}
static int s390_cpumsf_update_queues(struct s390_cpumsf *sf, u64 ts)
{
if (!sf->queues.new_data)
return 0;
sf->queues.new_data = false;
return s390_cpumsf_setup_queues(sf, ts);
}
static int s390_cpumsf_process_queues(struct s390_cpumsf *sf, u64 timestamp)
{
unsigned int queue_nr;
u64 ts;
int ret;
while (1) {
struct auxtrace_queue *queue;
struct s390_cpumsf_queue *sfq;
if (!sf->heap.heap_cnt)
return 0;
if (sf->heap.heap_array[0].ordinal >= timestamp)
return 0;
queue_nr = sf->heap.heap_array[0].queue_nr;
queue = &sf->queues.queue_array[queue_nr];
sfq = queue->priv;
auxtrace_heap__pop(&sf->heap);
if (sf->heap.heap_cnt) {
ts = sf->heap.heap_array[0].ordinal + 1;
if (ts > timestamp)
ts = timestamp;
} else {
ts = timestamp;
}
ret = s390_cpumsf_run_decoder(sfq, &ts);
if (ret < 0) {
auxtrace_heap__add(&sf->heap, queue_nr, ts);
return ret;
}
if (!ret) {
ret = auxtrace_heap__add(&sf->heap, queue_nr, ts);
if (ret < 0)
return ret;
}
}
return 0;
}
static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
char msg[MAX_AUXTRACE_ERROR_MSG];
union perf_event event;
int err;
strncpy(msg, "Lost Auxiliary Trace Buffer", sizeof(msg) - 1);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(sf->session, &event, NULL);
if (err)
pr_err("s390 Auxiliary Trace: failed to deliver error event,"
"error %d\n", err);
return err;
}
static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample)
{
return s390_cpumsf_synth_error(sf, 1, sample->cpu,
sample->pid, sample->tid, 0,
sample->time);
}
static int
s390_cpumsf_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
{
struct s390_cpumsf *sf = container_of(session->auxtrace,
struct s390_cpumsf,
auxtrace);
u64 timestamp = sample->time;
struct evsel *ev_bc000;
int err = 0;
if (dump_trace)
return 0;
if (!tool->ordered_events) {
pr_err("s390 Auxiliary Trace requires ordered events\n");
return -EINVAL;
}
if (event->header.type == PERF_RECORD_SAMPLE &&
sample->raw_size) {
/* Handle event with raw data */
ev_bc000 = evlist__event2evsel(session->evlist, event);
if (ev_bc000 &&
ev_bc000->core.attr.config == PERF_EVENT_CPUM_CF_DIAG)
err = s390_cpumcf_dumpctr(sf, sample);
return err;
}
if (event->header.type == PERF_RECORD_AUX &&
event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
return s390_cpumsf_lost(sf, sample);
if (timestamp) {
err = s390_cpumsf_update_queues(sf, timestamp);
if (!err)
err = s390_cpumsf_process_queues(sf, timestamp);
}
return err;
}
struct s390_cpumsf_synth {
struct perf_tool cpumsf_tool;
struct perf_session *session;
};
static int
s390_cpumsf_process_auxtrace_event(struct perf_session *session,
union perf_event *event __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
struct s390_cpumsf *sf = container_of(session->auxtrace,
struct s390_cpumsf,
auxtrace);
int fd = perf_data__fd(session->data);
struct auxtrace_buffer *buffer;
off_t data_offset;
int err;
if (sf->data_queued)
return 0;
if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
if (data_offset == -1)
return -errno;
}
err = auxtrace_queues__add_event(&sf->queues, session, event,
data_offset, &buffer);
if (err)
return err;
/* Dump here after copying piped trace out of the pipe */
if (dump_trace) {
if (auxtrace_buffer__get_data(buffer, fd)) {
s390_cpumsf_dump_event(sf, buffer->data,
buffer->size);
auxtrace_buffer__put_data(buffer);
}
}
return 0;
}
static void s390_cpumsf_free_events(struct perf_session *session __maybe_unused)
{
}
static int s390_cpumsf_flush(struct perf_session *session __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
return 0;
}
static void s390_cpumsf_free_queues(struct perf_session *session)
{
struct s390_cpumsf *sf = container_of(session->auxtrace,
struct s390_cpumsf,
auxtrace);
struct auxtrace_queues *queues = &sf->queues;
unsigned int i;
for (i = 0; i < queues->nr_queues; i++) {
struct s390_cpumsf_queue *sfq = (struct s390_cpumsf_queue *)
queues->queue_array[i].priv;
if (sfq != NULL) {
if (sfq->logfile) {
fclose(sfq->logfile);
sfq->logfile = NULL;
}
if (sfq->logfile_ctr) {
fclose(sfq->logfile_ctr);
sfq->logfile_ctr = NULL;
}
}
zfree(&queues->queue_array[i].priv);
}
auxtrace_queues__free(queues);
}
static void s390_cpumsf_free(struct perf_session *session)
{
struct s390_cpumsf *sf = container_of(session->auxtrace,
struct s390_cpumsf,
auxtrace);
auxtrace_heap__free(&sf->heap);
s390_cpumsf_free_queues(session);
session->auxtrace = NULL;
zfree(&sf->logdir);
free(sf);
}
static bool
s390_cpumsf_evsel_is_auxtrace(struct perf_session *session __maybe_unused,
struct evsel *evsel)
{
return evsel->core.attr.type == PERF_TYPE_RAW &&
evsel->core.attr.config == PERF_EVENT_CPUM_SF_DIAG;
}
static int s390_cpumsf_get_type(const char *cpuid)
{
int ret, family = 0;
ret = sscanf(cpuid, "%*[^,],%u", &family);
return (ret == 1) ? family : 0;
}
/* Check itrace options set on perf report command.
* Return true, if none are set or all options specified can be
* handled on s390 (currently only option 'd' for logging.
* Return false otherwise.
*/
static bool check_auxtrace_itrace(struct itrace_synth_opts *itops)
{
bool ison = false;
if (!itops || !itops->set)
return true;
ison = itops->inject || itops->instructions || itops->branches ||
itops->transactions || itops->ptwrites ||
itops->pwr_events || itops->errors ||
itops->dont_decode || itops->calls || itops->returns ||
itops->callchain || itops->thread_stack ||
itops->last_branch || itops->add_callchain ||
itops->add_last_branch;
if (!ison)
return true;
pr_err("Unsupported --itrace options specified\n");
return false;
}
/* Check for AUXTRACE dump directory if it is needed.
* On failure print an error message but continue.
* Return 0 on wrong keyword in config file and 1 otherwise.
*/
static int s390_cpumsf__config(const char *var, const char *value, void *cb)
{
struct s390_cpumsf *sf = cb;
struct stat stbuf;
int rc;
if (strcmp(var, "auxtrace.dumpdir"))
return 0;
sf->logdir = strdup(value);
if (sf->logdir == NULL) {
pr_err("Failed to find auxtrace log directory %s,"
" continue with current directory...\n", value);
return 1;
}
rc = stat(sf->logdir, &stbuf);
if (rc == -1 || !S_ISDIR(stbuf.st_mode)) {
pr_err("Missing auxtrace log directory %s,"
" continue with current directory...\n", value);
zfree(&sf->logdir);
}
return 1;
}
int s390_cpumsf_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
struct s390_cpumsf *sf;
int err;
if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info))
return -EINVAL;
sf = zalloc(sizeof(struct s390_cpumsf));
if (sf == NULL)
return -ENOMEM;
if (!check_auxtrace_itrace(session->itrace_synth_opts)) {
err = -EINVAL;
goto err_free;
}
sf->use_logfile = session->itrace_synth_opts->log;
if (sf->use_logfile)
perf_config(s390_cpumsf__config, sf);
err = auxtrace_queues__init(&sf->queues);
if (err)
goto err_free;
sf->session = session;
sf->machine = &session->machines.host; /* No kvm support */
sf->auxtrace_type = auxtrace_info->type;
sf->pmu_type = PERF_TYPE_RAW;
sf->machine_type = s390_cpumsf_get_type(session->evlist->env->cpuid);
sf->auxtrace.process_event = s390_cpumsf_process_event;
sf->auxtrace.process_auxtrace_event = s390_cpumsf_process_auxtrace_event;
sf->auxtrace.flush_events = s390_cpumsf_flush;
sf->auxtrace.free_events = s390_cpumsf_free_events;
sf->auxtrace.free = s390_cpumsf_free;
sf->auxtrace.evsel_is_auxtrace = s390_cpumsf_evsel_is_auxtrace;
session->auxtrace = &sf->auxtrace;
if (dump_trace)
return 0;
err = auxtrace_queues__process_index(&sf->queues, session);
if (err)
goto err_free_queues;
if (sf->queues.populated)
sf->data_queued = true;
return 0;
err_free_queues:
auxtrace_queues__free(&sf->queues);
session->auxtrace = NULL;
err_free:
zfree(&sf->logdir);
free(sf);
return err;
}
| linux-master | tools/perf/util/s390-cpumsf.c |
// SPDX-License-Identifier: GPL-2.0
#include "units.h"
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <linux/kernel.h>
#include <linux/time64.h>
unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
{
struct parse_tag *i = tags;
while (i->tag) {
char *s = strchr(str, i->tag);
if (s) {
unsigned long int value;
char *endptr;
value = strtoul(str, &endptr, 10);
if (s != endptr)
break;
if (value > ULONG_MAX / i->mult)
break;
value *= i->mult;
return value;
}
i++;
}
return (unsigned long) -1;
}
double convert_unit_double(double value, char *unit)
{
*unit = ' ';
if (value > 1000.0) {
value /= 1000.0;
*unit = 'K';
}
if (value > 1000.0) {
value /= 1000.0;
*unit = 'M';
}
if (value > 1000.0) {
value /= 1000.0;
*unit = 'G';
}
return value;
}
unsigned long convert_unit(unsigned long value, char *unit)
{
double v = convert_unit_double((double)value, unit);
return (unsigned long)v;
}
int unit_number__scnprintf(char *buf, size_t size, u64 n)
{
char unit[4] = "BKMG";
int i = 0;
while (((n / 1024) > 1) && (i < 3)) {
n /= 1024;
i++;
}
return scnprintf(buf, size, "%" PRIu64 "%c", n, unit[i]);
}
| linux-master | tools/perf/util/units.c |
// SPDX-License-Identifier: GPL-2.0
/*
* usage.c
*
* Various reporting routines.
* Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
*/
#include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <linux/compiler.h>
const char perf_usage_string[] =
"perf [--version] [--help] [OPTIONS] COMMAND [ARGS]";
const char perf_more_info_string[] =
"See 'perf help COMMAND' for more information on a specific command.";
static __noreturn void usage_builtin(const char *err)
{
fprintf(stderr, "\n Usage: %s\n", err);
exit(129);
}
/* If we are in a dlopen()ed .so write to a global variable would segfault
* (ugh), so keep things static. */
static void (*usage_routine)(const char *err) __noreturn = usage_builtin;
void usage(const char *err)
{
usage_routine(err);
}
| linux-master | tools/perf/util/usage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for libpfm4 event encoding.
*
* Copyright 2020 Google LLC.
*/
#include "util/cpumap.h"
#include "util/debug.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/parse-events.h"
#include "util/pmus.h"
#include "util/pfm.h"
#include "util/strbuf.h"
#include "util/thread_map.h"
#include <string.h>
#include <linux/kernel.h>
#include <perfmon/pfmlib_perf_event.h>
static void libpfm_initialize(void)
{
int ret;
ret = pfm_initialize();
if (ret != PFM_SUCCESS) {
ui__warning("libpfm failed to initialize: %s\n",
pfm_strerror(ret));
}
}
int parse_libpfm_events_option(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct evlist *evlist = *(struct evlist **)opt->value;
struct perf_event_attr attr;
struct perf_pmu *pmu;
struct evsel *evsel, *grp_leader = NULL;
char *p, *q, *p_orig;
const char *sep;
int grp_evt = -1;
int ret;
libpfm_initialize();
p_orig = p = strdup(str);
if (!p)
return -1;
/*
* force loading of the PMU list
*/
perf_pmus__scan(NULL);
for (q = p; strsep(&p, ",{}"); q = p) {
sep = p ? str + (p - p_orig - 1) : "";
if (*sep == '{') {
if (grp_evt > -1) {
ui__error(
"nested event groups not supported\n");
goto error;
}
grp_evt++;
}
/* no event */
if (*q == '\0') {
if (*sep == '}') {
if (grp_evt < 0) {
ui__error("cannot close a non-existing event group\n");
goto error;
}
grp_evt--;
}
continue;
}
memset(&attr, 0, sizeof(attr));
event_attr_init(&attr);
ret = pfm_get_perf_event_encoding(q, PFM_PLM0|PFM_PLM3,
&attr, NULL, NULL);
if (ret != PFM_SUCCESS) {
ui__error("failed to parse event %s : %s\n", str,
pfm_strerror(ret));
goto error;
}
pmu = perf_pmus__find_by_type((unsigned int)attr.type);
evsel = parse_events__add_event(evlist->core.nr_entries,
&attr, q, /*metric_id=*/NULL,
pmu);
if (evsel == NULL)
goto error;
evsel->is_libpfm_event = true;
evlist__add(evlist, evsel);
if (grp_evt == 0)
grp_leader = evsel;
if (grp_evt > -1) {
evsel__set_leader(evsel, grp_leader);
grp_leader->core.nr_members++;
grp_evt++;
}
if (*sep == '}') {
if (grp_evt < 0) {
ui__error(
"cannot close a non-existing event group\n");
goto error;
}
grp_leader = NULL;
grp_evt = -1;
}
}
free(p_orig);
return 0;
error:
free(p_orig);
return -1;
}
static bool is_libpfm_event_supported(const char *name, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
struct perf_pmu *pmu;
struct evsel *evsel;
struct perf_event_attr attr = {};
bool result = true;
int ret;
ret = pfm_get_perf_event_encoding(name, PFM_PLM0|PFM_PLM3,
&attr, NULL, NULL);
if (ret != PFM_SUCCESS)
return false;
pmu = perf_pmus__find_by_type((unsigned int)attr.type);
evsel = parse_events__add_event(0, &attr, name, /*metric_id=*/NULL, pmu);
if (evsel == NULL)
return false;
evsel->is_libpfm_event = true;
if (evsel__open(evsel, cpus, threads) < 0)
result = false;
evsel__close(evsel);
evsel__delete(evsel);
return result;
}
static const char *srcs[PFM_ATTR_CTRL_MAX] = {
[PFM_ATTR_CTRL_UNKNOWN] = "???",
[PFM_ATTR_CTRL_PMU] = "PMU",
[PFM_ATTR_CTRL_PERF_EVENT] = "perf_event",
};
static void
print_attr_flags(struct strbuf *buf, const pfm_event_attr_info_t *info)
{
if (info->is_dfl)
strbuf_addf(buf, "[default] ");
if (info->is_precise)
strbuf_addf(buf, "[precise] ");
}
static void
print_libpfm_event(const struct print_callbacks *print_cb, void *print_state,
const pfm_pmu_info_t *pinfo, const pfm_event_info_t *info,
struct strbuf *buf)
{
int j, ret;
char topic[80], name[80];
struct perf_cpu_map *cpus = perf_cpu_map__empty_new(1);
struct perf_thread_map *threads = thread_map__new_by_tid(0);
strbuf_setlen(buf, 0);
snprintf(topic, sizeof(topic), "pfm %s", pinfo->name);
snprintf(name, sizeof(name), "%s::%s", pinfo->name, info->name);
strbuf_addf(buf, "Code: 0x%"PRIx64"\n", info->code);
pfm_for_each_event_attr(j, info) {
pfm_event_attr_info_t ainfo;
const char *src;
ainfo.size = sizeof(ainfo);
ret = pfm_get_event_attr_info(info->idx, j, PFM_OS_PERF_EVENT_EXT, &ainfo);
if (ret != PFM_SUCCESS)
continue;
if (ainfo.ctrl >= PFM_ATTR_CTRL_MAX)
ainfo.ctrl = PFM_ATTR_CTRL_UNKNOWN;
src = srcs[ainfo.ctrl];
switch (ainfo.type) {
case PFM_ATTR_UMASK: /* Ignore for now */
break;
case PFM_ATTR_MOD_BOOL:
strbuf_addf(buf, " Modif: %s: [%s] : %s (boolean)\n", src,
ainfo.name, ainfo.desc);
break;
case PFM_ATTR_MOD_INTEGER:
strbuf_addf(buf, " Modif: %s: [%s] : %s (integer)\n", src,
ainfo.name, ainfo.desc);
break;
case PFM_ATTR_NONE:
case PFM_ATTR_RAW_UMASK:
case PFM_ATTR_MAX:
default:
strbuf_addf(buf, " Attr: %s: [%s] : %s\n", src,
ainfo.name, ainfo.desc);
}
}
if (is_libpfm_event_supported(name, cpus, threads)) {
print_cb->print_event(print_state, pinfo->name, topic,
name, info->equiv,
/*scale_unit=*/NULL,
/*deprecated=*/NULL, "PFM event",
info->desc, /*long_desc=*/NULL,
/*encoding_desc=*/buf->buf);
}
pfm_for_each_event_attr(j, info) {
pfm_event_attr_info_t ainfo;
const char *src;
strbuf_setlen(buf, 0);
ainfo.size = sizeof(ainfo);
ret = pfm_get_event_attr_info(info->idx, j, PFM_OS_PERF_EVENT_EXT, &ainfo);
if (ret != PFM_SUCCESS)
continue;
if (ainfo.ctrl >= PFM_ATTR_CTRL_MAX)
ainfo.ctrl = PFM_ATTR_CTRL_UNKNOWN;
src = srcs[ainfo.ctrl];
if (ainfo.type == PFM_ATTR_UMASK) {
strbuf_addf(buf, "Umask: 0x%02"PRIx64" : %s: ",
ainfo.code, src);
print_attr_flags(buf, &ainfo);
snprintf(name, sizeof(name), "%s::%s:%s",
pinfo->name, info->name, ainfo.name);
if (!is_libpfm_event_supported(name, cpus, threads))
continue;
print_cb->print_event(print_state,
pinfo->name,
topic,
name, /*alias=*/NULL,
/*scale_unit=*/NULL,
/*deprecated=*/NULL, "PFM event",
ainfo.desc, /*long_desc=*/NULL,
/*encoding_desc=*/buf->buf);
}
}
perf_cpu_map__put(cpus);
perf_thread_map__put(threads);
}
void print_libpfm_events(const struct print_callbacks *print_cb, void *print_state)
{
pfm_event_info_t info;
pfm_pmu_info_t pinfo;
int p, ret;
struct strbuf storage;
libpfm_initialize();
/* initialize to zero to indicate ABI version */
info.size = sizeof(info);
pinfo.size = sizeof(pinfo);
strbuf_init(&storage, 2048);
pfm_for_all_pmus(p) {
ret = pfm_get_pmu_info(p, &pinfo);
if (ret != PFM_SUCCESS)
continue;
/* only print events that are supported by host HW */
if (!pinfo.is_present)
continue;
/* handled by perf directly */
if (pinfo.pmu == PFM_PMU_PERF_EVENT)
continue;
for (int i = pinfo.first_event; i != -1; i = pfm_get_event_next(i)) {
ret = pfm_get_event_info(i, PFM_OS_PERF_EVENT_EXT,
&info);
if (ret != PFM_SUCCESS)
continue;
print_libpfm_event(print_cb, print_state, &pinfo, &info, &storage);
}
}
strbuf_release(&storage);
}
| linux-master | tools/perf/util/pfm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* JSON export.
*
* Copyright (C) 2021, CodeWeavers Inc. <[email protected]>
*/
#include "data-convert.h"
#include <fcntl.h>
#include <inttypes.h>
#include <sys/stat.h>
#include <unistd.h>
#include "linux/compiler.h"
#include "linux/err.h"
#include "util/auxtrace.h"
#include "util/debug.h"
#include "util/dso.h"
#include "util/event.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/header.h"
#include "util/map.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/tool.h"
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
struct convert_json {
struct perf_tool tool;
FILE *out;
bool first;
u64 events_count;
};
// Outputs a JSON-encoded string surrounded by quotes with characters escaped.
static void output_json_string(FILE *out, const char *s)
{
fputc('"', out);
while (*s) {
switch (*s) {
// required escapes with special forms as per RFC 8259
case '"': fputs("\\\"", out); break;
case '\\': fputs("\\\\", out); break;
case '\b': fputs("\\b", out); break;
case '\f': fputs("\\f", out); break;
case '\n': fputs("\\n", out); break;
case '\r': fputs("\\r", out); break;
case '\t': fputs("\\t", out); break;
default:
// all other control characters must be escaped by hex code
if (*s <= 0x1f)
fprintf(out, "\\u%04x", *s);
else
fputc(*s, out);
break;
}
++s;
}
fputc('"', out);
}
// Outputs an optional comma, newline and indentation to delimit a new value
// from the previous one in a JSON object or array.
static void output_json_delimiters(FILE *out, bool comma, int depth)
{
int i;
if (comma)
fputc(',', out);
fputc('\n', out);
for (i = 0; i < depth; ++i)
fputc('\t', out);
}
// Outputs a printf format string (with delimiter) as a JSON value.
__printf(4, 5)
static void output_json_format(FILE *out, bool comma, int depth, const char *format, ...)
{
va_list args;
output_json_delimiters(out, comma, depth);
va_start(args, format);
vfprintf(out, format, args);
va_end(args);
}
// Outputs a JSON key-value pair where the value is a string.
static void output_json_key_string(FILE *out, bool comma, int depth,
const char *key, const char *value)
{
output_json_delimiters(out, comma, depth);
output_json_string(out, key);
fputs(": ", out);
output_json_string(out, value);
}
// Outputs a JSON key-value pair where the value is a printf format string.
__printf(5, 6)
static void output_json_key_format(FILE *out, bool comma, int depth,
const char *key, const char *format, ...)
{
va_list args;
output_json_delimiters(out, comma, depth);
output_json_string(out, key);
fputs(": ", out);
va_start(args, format);
vfprintf(out, format, args);
va_end(args);
}
static void output_sample_callchain_entry(struct perf_tool *tool,
u64 ip, struct addr_location *al)
{
struct convert_json *c = container_of(tool, struct convert_json, tool);
FILE *out = c->out;
output_json_format(out, false, 4, "{");
output_json_key_format(out, false, 5, "ip", "\"0x%" PRIx64 "\"", ip);
if (al && al->sym && al->sym->namelen) {
struct dso *dso = al->map ? map__dso(al->map) : NULL;
fputc(',', out);
output_json_key_string(out, false, 5, "symbol", al->sym->name);
if (dso) {
const char *dso_name = dso->short_name;
if (dso_name && strlen(dso_name) > 0) {
fputc(',', out);
output_json_key_string(out, false, 5, "dso", dso_name);
}
}
}
output_json_format(out, false, 4, "}");
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
struct machine *machine)
{
struct convert_json *c = container_of(tool, struct convert_json, tool);
FILE *out = c->out;
struct addr_location al;
u64 sample_type = __evlist__combined_sample_type(evsel->evlist);
u8 cpumode = PERF_RECORD_MISC_USER;
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
pr_err("Sample resolution failed!\n");
addr_location__exit(&al);
return -1;
}
++c->events_count;
if (c->first)
c->first = false;
else
fputc(',', out);
output_json_format(out, false, 2, "{");
output_json_key_format(out, false, 3, "timestamp", "%" PRIi64, sample->time);
output_json_key_format(out, true, 3, "pid", "%i", thread__pid(al.thread));
output_json_key_format(out, true, 3, "tid", "%i", thread__tid(al.thread));
if ((sample_type & PERF_SAMPLE_CPU))
output_json_key_format(out, true, 3, "cpu", "%i", sample->cpu);
else if (thread__cpu(al.thread) >= 0)
output_json_key_format(out, true, 3, "cpu", "%i", thread__cpu(al.thread));
output_json_key_string(out, true, 3, "comm", thread__comm_str(al.thread));
output_json_key_format(out, true, 3, "callchain", "[");
if (sample->callchain) {
unsigned int i;
bool ok;
bool first_callchain = true;
for (i = 0; i < sample->callchain->nr; ++i) {
u64 ip = sample->callchain->ips[i];
struct addr_location tal;
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
cpumode = PERF_RECORD_MISC_HYPERVISOR;
break;
case PERF_CONTEXT_KERNEL:
cpumode = PERF_RECORD_MISC_KERNEL;
break;
case PERF_CONTEXT_USER:
cpumode = PERF_RECORD_MISC_USER;
break;
default:
pr_debug("invalid callchain context: %"
PRId64 "\n", (s64) ip);
break;
}
continue;
}
if (first_callchain)
first_callchain = false;
else
fputc(',', out);
addr_location__init(&tal);
ok = thread__find_symbol(al.thread, cpumode, ip, &tal);
output_sample_callchain_entry(tool, ip, ok ? &tal : NULL);
addr_location__exit(&tal);
}
} else {
output_sample_callchain_entry(tool, sample->ip, &al);
}
output_json_format(out, false, 3, "]");
#ifdef HAVE_LIBTRACEEVENT
if (sample->raw_data) {
int i;
struct tep_format_field **fields;
fields = tep_event_fields(evsel->tp_format);
if (fields) {
i = 0;
while (fields[i]) {
struct trace_seq s;
trace_seq_init(&s);
tep_print_field(&s, sample->raw_data, fields[i]);
output_json_key_string(out, true, 3, fields[i]->name, s.buffer);
i++;
}
free(fields);
}
}
#endif
output_json_format(out, false, 2, "}");
addr_location__exit(&al);
return 0;
}
static void output_headers(struct perf_session *session, struct convert_json *c)
{
struct stat st;
struct perf_header *header = &session->header;
int ret;
int fd = perf_data__fd(session->data);
int i;
FILE *out = c->out;
output_json_key_format(out, false, 2, "header-version", "%u", header->version);
ret = fstat(fd, &st);
if (ret >= 0) {
time_t stctime = st.st_mtime;
char buf[256];
strftime(buf, sizeof(buf), "%FT%TZ", gmtime(&stctime));
output_json_key_string(out, true, 2, "captured-on", buf);
} else {
pr_debug("Failed to get mtime of source file, not writing captured-on");
}
output_json_key_format(out, true, 2, "data-offset", "%" PRIu64, header->data_offset);
output_json_key_format(out, true, 2, "data-size", "%" PRIu64, header->data_size);
output_json_key_format(out, true, 2, "feat-offset", "%" PRIu64, header->feat_offset);
output_json_key_string(out, true, 2, "hostname", header->env.hostname);
output_json_key_string(out, true, 2, "os-release", header->env.os_release);
output_json_key_string(out, true, 2, "arch", header->env.arch);
output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc);
output_json_key_string(out, true, 2, "cpuid", header->env.cpuid);
output_json_key_format(out, true, 2, "nrcpus-online", "%u", header->env.nr_cpus_online);
output_json_key_format(out, true, 2, "nrcpus-avail", "%u", header->env.nr_cpus_avail);
if (header->env.clock.enabled) {
output_json_key_format(out, true, 2, "clockid",
"%u", header->env.clock.clockid);
output_json_key_format(out, true, 2, "clock-time",
"%" PRIu64, header->env.clock.clockid_ns);
output_json_key_format(out, true, 2, "real-time",
"%" PRIu64, header->env.clock.tod_ns);
}
output_json_key_string(out, true, 2, "perf-version", header->env.version);
output_json_key_format(out, true, 2, "cmdline", "[");
for (i = 0; i < header->env.nr_cmdline; i++) {
output_json_delimiters(out, i != 0, 3);
output_json_string(c->out, header->env.cmdline_argv[i]);
}
output_json_format(out, false, 2, "]");
}
int bt_convert__perf2json(const char *input_name, const char *output_name,
struct perf_data_convert_opts *opts __maybe_unused)
{
struct perf_session *session;
int fd;
int ret = -1;
struct convert_json c = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
.cgroup = perf_event__process_cgroup,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
#ifdef HAVE_LIBTRACEEVENT
.tracing_data = perf_event__process_tracing_data,
#endif
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.event_update = perf_event__process_event_update,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
.first = true,
.events_count = 0,
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
.path = input_name,
.force = opts->force,
};
if (opts->all) {
pr_err("--all is currently unsupported for JSON output.\n");
goto err;
}
if (opts->tod) {
pr_err("--tod is currently unsupported for JSON output.\n");
goto err;
}
fd = open(output_name, O_CREAT | O_WRONLY | (opts->force ? O_TRUNC : O_EXCL), 0666);
if (fd == -1) {
if (errno == EEXIST)
pr_err("Output file exists. Use --force to overwrite it.\n");
else
pr_err("Error opening output file!\n");
goto err;
}
c.out = fdopen(fd, "w");
if (!c.out) {
fprintf(stderr, "Error opening output file!\n");
close(fd);
goto err;
}
session = perf_session__new(&data, &c.tool);
if (IS_ERR(session)) {
fprintf(stderr, "Error creating perf session!\n");
goto err_fclose;
}
if (symbol__init(&session->header.env) < 0) {
fprintf(stderr, "Symbol init error!\n");
goto err_session_delete;
}
// The opening brace is printed manually because it isn't delimited from a
// previous value (i.e. we don't want a leading newline)
fputc('{', c.out);
// Version number for future-proofing. Most additions should be able to be
// done in a backwards-compatible way so this should only need to be bumped
// if some major breaking change must be made.
output_json_format(c.out, false, 1, "\"linux-perf-json-version\": 1");
// Output headers
output_json_format(c.out, true, 1, "\"headers\": {");
output_headers(session, &c);
output_json_format(c.out, false, 1, "}");
// Output samples
output_json_format(c.out, true, 1, "\"samples\": [");
perf_session__process_events(session);
output_json_format(c.out, false, 1, "]");
output_json_format(c.out, false, 0, "}");
fputc('\n', c.out);
fprintf(stderr,
"[ perf data convert: Converted '%s' into JSON data '%s' ]\n",
data.path, output_name);
fprintf(stderr,
"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
(ftell(c.out)) / 1024.0 / 1024.0, c.events_count);
ret = 0;
err_session_delete:
perf_session__delete(session);
err_fclose:
fclose(c.out);
err:
return ret;
}
| linux-master | tools/perf/util/data-convert-json.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include "ordered-events.h"
#include "session.h"
#include "asm/bug.h"
#include "debug.h"
#include "ui/progress.h"
#define pr_N(n, fmt, ...) \
eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
static void queue_event(struct ordered_events *oe, struct ordered_event *new)
{
struct ordered_event *last = oe->last;
u64 timestamp = new->timestamp;
struct list_head *p;
++oe->nr_events;
oe->last = new;
pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
if (!last) {
list_add(&new->list, &oe->events);
oe->max_timestamp = timestamp;
return;
}
/*
* last event might point to some random place in the list as it's
* the last queued event. We expect that the new event is close to
* this.
*/
if (last->timestamp <= timestamp) {
while (last->timestamp <= timestamp) {
p = last->list.next;
if (p == &oe->events) {
list_add_tail(&new->list, &oe->events);
oe->max_timestamp = timestamp;
return;
}
last = list_entry(p, struct ordered_event, list);
}
list_add_tail(&new->list, &last->list);
} else {
while (last->timestamp > timestamp) {
p = last->list.prev;
if (p == &oe->events) {
list_add(&new->list, &oe->events);
return;
}
last = list_entry(p, struct ordered_event, list);
}
list_add(&new->list, &last->list);
}
}
static union perf_event *__dup_event(struct ordered_events *oe,
union perf_event *event)
{
union perf_event *new_event = NULL;
if (oe->cur_alloc_size < oe->max_alloc_size) {
new_event = memdup(event, event->header.size);
if (new_event)
oe->cur_alloc_size += event->header.size;
}
return new_event;
}
static union perf_event *dup_event(struct ordered_events *oe,
union perf_event *event)
{
return oe->copy_on_queue ? __dup_event(oe, event) : event;
}
static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
{
if (event) {
oe->cur_alloc_size -= event->header.size;
free(event);
}
}
static void free_dup_event(struct ordered_events *oe, union perf_event *event)
{
if (oe->copy_on_queue)
__free_dup_event(oe, event);
}
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
static struct ordered_event *alloc_event(struct ordered_events *oe,
union perf_event *event)
{
struct list_head *cache = &oe->cache;
struct ordered_event *new = NULL;
union perf_event *new_event;
size_t size;
new_event = dup_event(oe, event);
if (!new_event)
return NULL;
/*
* We maintain the following scheme of buffers for ordered
* event allocation:
*
* to_free list -> buffer1 (64K)
* buffer2 (64K)
* ...
*
* Each buffer keeps an array of ordered events objects:
* buffer -> event[0]
* event[1]
* ...
*
* Each allocated ordered event is linked to one of
* following lists:
* - time ordered list 'events'
* - list of currently removed events 'cache'
*
* Allocation of the ordered event uses the following order
* to get the memory:
* - use recently removed object from 'cache' list
* - use available object in current allocation buffer
* - allocate new buffer if the current buffer is full
*
* Removal of ordered event object moves it from events to
* the cache list.
*/
size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
if (!list_empty(cache)) {
new = list_entry(cache->next, struct ordered_event, list);
list_del_init(&new->list);
} else if (oe->buffer) {
new = &oe->buffer->event[oe->buffer_idx];
if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
oe->buffer = NULL;
} else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
oe->buffer = malloc(size);
if (!oe->buffer) {
free_dup_event(oe, new_event);
return NULL;
}
pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
oe->cur_alloc_size, size, oe->max_alloc_size);
oe->cur_alloc_size += size;
list_add(&oe->buffer->list, &oe->to_free);
oe->buffer_idx = 1;
new = &oe->buffer->event[0];
} else {
pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
return NULL;
}
new->event = new_event;
return new;
}
static struct ordered_event *
ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
union perf_event *event)
{
struct ordered_event *new;
new = alloc_event(oe, event);
if (new) {
new->timestamp = timestamp;
queue_event(oe, new);
}
return new;
}
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
{
list_move(&event->list, &oe->cache);
oe->nr_events--;
free_dup_event(oe, event->event);
event->event = NULL;
}
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
u64 timestamp, u64 file_offset, const char *file_path)
{
struct ordered_event *oevent;
if (!timestamp || timestamp == ~0ULL)
return -ETIME;
if (timestamp < oe->last_flush) {
pr_oe_time(timestamp, "out of order event\n");
pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
oe->last_flush_type);
oe->nr_unordered_events++;
}
oevent = ordered_events__new_event(oe, timestamp, event);
if (!oevent) {
ordered_events__flush(oe, OE_FLUSH__HALF);
oevent = ordered_events__new_event(oe, timestamp, event);
}
if (!oevent)
return -ENOMEM;
oevent->file_offset = file_offset;
oevent->file_path = file_path;
return 0;
}
static int do_flush(struct ordered_events *oe, bool show_progress)
{
struct list_head *head = &oe->events;
struct ordered_event *tmp, *iter;
u64 limit = oe->next_flush;
u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
struct ui_progress prog;
int ret;
if (!limit)
return 0;
if (show_progress)
ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
list_for_each_entry_safe(iter, tmp, head, list) {
if (session_done())
return 0;
if (iter->timestamp > limit)
break;
ret = oe->deliver(oe, iter);
if (ret)
return ret;
ordered_events__delete(oe, iter);
oe->last_flush = iter->timestamp;
if (show_progress)
ui_progress__update(&prog, 1);
}
if (list_empty(head))
oe->last = NULL;
else if (last_ts <= limit)
oe->last = list_entry(head->prev, struct ordered_event, list);
if (show_progress)
ui_progress__finish();
return 0;
}
static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
u64 timestamp)
{
static const char * const str[] = {
"NONE",
"FINAL",
"ROUND",
"HALF ",
"TOP ",
"TIME ",
};
int err;
bool show_progress = false;
if (oe->nr_events == 0)
return 0;
switch (how) {
case OE_FLUSH__FINAL:
show_progress = true;
fallthrough;
case OE_FLUSH__TOP:
oe->next_flush = ULLONG_MAX;
break;
case OE_FLUSH__HALF:
{
struct ordered_event *first, *last;
struct list_head *head = &oe->events;
first = list_entry(head->next, struct ordered_event, list);
last = oe->last;
/* Warn if we are called before any event got allocated. */
if (WARN_ONCE(!last || list_empty(head), "empty queue"))
return 0;
oe->next_flush = first->timestamp;
oe->next_flush += (last->timestamp - first->timestamp) / 2;
break;
}
case OE_FLUSH__TIME:
oe->next_flush = timestamp;
show_progress = false;
break;
case OE_FLUSH__ROUND:
case OE_FLUSH__NONE:
default:
break;
}
pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
str[how], oe->nr_events);
pr_oe_time(oe->max_timestamp, "max_timestamp\n");
err = do_flush(oe, show_progress);
if (!err) {
if (how == OE_FLUSH__ROUND)
oe->next_flush = oe->max_timestamp;
oe->last_flush_type = how;
}
pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
str[how], oe->nr_events);
pr_oe_time(oe->last_flush, "last_flush\n");
return err;
}
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
{
return __ordered_events__flush(oe, how, 0);
}
int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
{
return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
}
u64 ordered_events__first_time(struct ordered_events *oe)
{
struct ordered_event *event;
if (list_empty(&oe->events))
return 0;
event = list_first_entry(&oe->events, struct ordered_event, list);
return event->timestamp;
}
void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
void *data)
{
INIT_LIST_HEAD(&oe->events);
INIT_LIST_HEAD(&oe->cache);
INIT_LIST_HEAD(&oe->to_free);
oe->max_alloc_size = (u64) -1;
oe->cur_alloc_size = 0;
oe->deliver = deliver;
oe->data = data;
}
static void
ordered_events_buffer__free(struct ordered_events_buffer *buffer,
unsigned int max, struct ordered_events *oe)
{
if (oe->copy_on_queue) {
unsigned int i;
for (i = 0; i < max; i++)
__free_dup_event(oe, buffer->event[i].event);
}
free(buffer);
}
void ordered_events__free(struct ordered_events *oe)
{
struct ordered_events_buffer *buffer, *tmp;
if (list_empty(&oe->to_free))
return;
/*
* Current buffer might not have all the events allocated
* yet, we need to free only allocated ones ...
*/
if (oe->buffer) {
list_del_init(&oe->buffer->list);
ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
}
/* ... and continue with the rest */
list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
list_del_init(&buffer->list);
ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
}
}
void ordered_events__reinit(struct ordered_events *oe)
{
ordered_events__deliver_t old_deliver = oe->deliver;
ordered_events__free(oe);
memset(oe, '\0', sizeof(*oe));
ordered_events__init(oe, old_deliver, oe->data);
}
| linux-master | tools/perf/util/ordered-events.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <api/io.h>
#include "util/dso.h"
#include "util/debug.h"
#include "util/callchain.h"
#include "util/symbol_conf.h"
#include "srcline.h"
#include "string2.h"
#include "symbol.h"
#include "subcmd/run-command.h"
/* If addr2line doesn't return data for 1 second then timeout. */
int addr2line_timeout_ms = 1 * 1000;
bool srcline_full_filename;
char *srcline__unknown = (char *)"??:0";
static const char *dso__name(struct dso *dso)
{
const char *dso_name;
if (dso->symsrc_filename)
dso_name = dso->symsrc_filename;
else
dso_name = dso->long_name;
if (dso_name[0] == '[')
return NULL;
if (!strncmp(dso_name, "/tmp/perf-", 10))
return NULL;
return dso_name;
}
static int inline_list__append(struct symbol *symbol, char *srcline,
struct inline_node *node)
{
struct inline_list *ilist;
ilist = zalloc(sizeof(*ilist));
if (ilist == NULL)
return -1;
ilist->symbol = symbol;
ilist->srcline = srcline;
if (callchain_param.order == ORDER_CALLEE)
list_add_tail(&ilist->list, &node->val);
else
list_add(&ilist->list, &node->val);
return 0;
}
/* basename version that takes a const input string */
static const char *gnu_basename(const char *path)
{
const char *base = strrchr(path, '/');
return base ? base + 1 : path;
}
static char *srcline_from_fileline(const char *file, unsigned int line)
{
char *srcline;
if (!file)
return NULL;
if (!srcline_full_filename)
file = gnu_basename(file);
if (asprintf(&srcline, "%s:%u", file, line) < 0)
return NULL;
return srcline;
}
static struct symbol *new_inline_sym(struct dso *dso,
struct symbol *base_sym,
const char *funcname)
{
struct symbol *inline_sym;
char *demangled = NULL;
if (!funcname)
funcname = "??";
if (dso) {
demangled = dso__demangle_sym(dso, 0, funcname);
if (demangled)
funcname = demangled;
}
if (base_sym && strcmp(funcname, base_sym->name) == 0) {
/* reuse the real, existing symbol */
inline_sym = base_sym;
/* ensure that we don't alias an inlined symbol, which could
* lead to double frees in inline_node__delete
*/
assert(!base_sym->inlined);
} else {
/* create a fake symbol for the inline frame */
inline_sym = symbol__new(base_sym ? base_sym->start : 0,
base_sym ? (base_sym->end - base_sym->start) : 0,
base_sym ? base_sym->binding : 0,
base_sym ? base_sym->type : 0,
funcname);
if (inline_sym)
inline_sym->inlined = 1;
}
free(demangled);
return inline_sym;
}
#define MAX_INLINE_NEST 1024
#ifdef HAVE_LIBBFD_SUPPORT
/*
* Implement addr2line using libbfd.
*/
#define PACKAGE "perf"
#include <bfd.h>
struct a2l_data {
const char *input;
u64 addr;
bool found;
const char *filename;
const char *funcname;
unsigned line;
bfd *abfd;
asymbol **syms;
};
static int bfd_error(const char *string)
{
const char *errmsg;
errmsg = bfd_errmsg(bfd_get_error());
fflush(stdout);
if (string)
pr_debug("%s: %s\n", string, errmsg);
else
pr_debug("%s\n", errmsg);
return -1;
}
static int slurp_symtab(bfd *abfd, struct a2l_data *a2l)
{
long storage;
long symcount;
asymbol **syms;
bfd_boolean dynamic = FALSE;
if ((bfd_get_file_flags(abfd) & HAS_SYMS) == 0)
return bfd_error(bfd_get_filename(abfd));
storage = bfd_get_symtab_upper_bound(abfd);
if (storage == 0L) {
storage = bfd_get_dynamic_symtab_upper_bound(abfd);
dynamic = TRUE;
}
if (storage < 0L)
return bfd_error(bfd_get_filename(abfd));
syms = malloc(storage);
if (dynamic)
symcount = bfd_canonicalize_dynamic_symtab(abfd, syms);
else
symcount = bfd_canonicalize_symtab(abfd, syms);
if (symcount < 0) {
free(syms);
return bfd_error(bfd_get_filename(abfd));
}
a2l->syms = syms;
return 0;
}
static void find_address_in_section(bfd *abfd, asection *section, void *data)
{
bfd_vma pc, vma;
bfd_size_type size;
struct a2l_data *a2l = data;
flagword flags;
if (a2l->found)
return;
#ifdef bfd_get_section_flags
flags = bfd_get_section_flags(abfd, section);
#else
flags = bfd_section_flags(section);
#endif
if ((flags & SEC_ALLOC) == 0)
return;
pc = a2l->addr;
#ifdef bfd_get_section_vma
vma = bfd_get_section_vma(abfd, section);
#else
vma = bfd_section_vma(section);
#endif
#ifdef bfd_get_section_size
size = bfd_get_section_size(section);
#else
size = bfd_section_size(section);
#endif
if (pc < vma || pc >= vma + size)
return;
a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma,
&a2l->filename, &a2l->funcname,
&a2l->line);
if (a2l->filename && !strlen(a2l->filename))
a2l->filename = NULL;
}
static struct a2l_data *addr2line_init(const char *path)
{
bfd *abfd;
struct a2l_data *a2l = NULL;
abfd = bfd_openr(path, NULL);
if (abfd == NULL)
return NULL;
if (!bfd_check_format(abfd, bfd_object))
goto out;
a2l = zalloc(sizeof(*a2l));
if (a2l == NULL)
goto out;
a2l->abfd = abfd;
a2l->input = strdup(path);
if (a2l->input == NULL)
goto out;
if (slurp_symtab(abfd, a2l))
goto out;
return a2l;
out:
if (a2l) {
zfree((char **)&a2l->input);
free(a2l);
}
bfd_close(abfd);
return NULL;
}
static void addr2line_cleanup(struct a2l_data *a2l)
{
if (a2l->abfd)
bfd_close(a2l->abfd);
zfree((char **)&a2l->input);
zfree(&a2l->syms);
free(a2l);
}
static int inline_list__append_dso_a2l(struct dso *dso,
struct inline_node *node,
struct symbol *sym)
{
struct a2l_data *a2l = dso->a2l;
struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname);
char *srcline = NULL;
if (a2l->filename)
srcline = srcline_from_fileline(a2l->filename, a2l->line);
return inline_list__append(inline_sym, srcline, node);
}
static int addr2line(const char *dso_name, u64 addr,
char **file, unsigned int *line, struct dso *dso,
bool unwind_inlines, struct inline_node *node,
struct symbol *sym)
{
int ret = 0;
struct a2l_data *a2l = dso->a2l;
if (!a2l) {
dso->a2l = addr2line_init(dso_name);
a2l = dso->a2l;
}
if (a2l == NULL) {
if (!symbol_conf.disable_add2line_warn)
pr_warning("addr2line_init failed for %s\n", dso_name);
return 0;
}
a2l->addr = addr;
a2l->found = false;
bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
if (!a2l->found)
return 0;
if (unwind_inlines) {
int cnt = 0;
if (node && inline_list__append_dso_a2l(dso, node, sym))
return 0;
while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
&a2l->funcname, &a2l->line) &&
cnt++ < MAX_INLINE_NEST) {
if (a2l->filename && !strlen(a2l->filename))
a2l->filename = NULL;
if (node != NULL) {
if (inline_list__append_dso_a2l(dso, node, sym))
return 0;
// found at least one inline frame
ret = 1;
}
}
}
if (file) {
*file = a2l->filename ? strdup(a2l->filename) : NULL;
ret = *file ? 1 : 0;
}
if (line)
*line = a2l->line;
return ret;
}
void dso__free_a2l(struct dso *dso)
{
struct a2l_data *a2l = dso->a2l;
if (!a2l)
return;
addr2line_cleanup(a2l);
dso->a2l = NULL;
}
#else /* HAVE_LIBBFD_SUPPORT */
static int filename_split(char *filename, unsigned int *line_nr)
{
char *sep;
sep = strchr(filename, '\n');
if (sep)
*sep = '\0';
if (!strcmp(filename, "??:0"))
return 0;
sep = strchr(filename, ':');
if (sep) {
*sep++ = '\0';
*line_nr = strtoul(sep, NULL, 0);
return 1;
}
pr_debug("addr2line missing ':' in filename split\n");
return 0;
}
static void addr2line_subprocess_cleanup(struct child_process *a2l)
{
if (a2l->pid != -1) {
kill(a2l->pid, SIGKILL);
finish_command(a2l); /* ignore result, we don't care */
a2l->pid = -1;
}
free(a2l);
}
static struct child_process *addr2line_subprocess_init(const char *addr2line_path,
const char *binary_path)
{
const char *argv[] = {
addr2line_path ?: "addr2line",
"-e", binary_path,
"-a", "-i", "-f", NULL
};
struct child_process *a2l = zalloc(sizeof(*a2l));
int start_command_status = 0;
if (a2l == NULL) {
pr_err("Failed to allocate memory for addr2line");
return NULL;
}
a2l->pid = -1;
a2l->in = -1;
a2l->out = -1;
a2l->no_stderr = 1;
a2l->argv = argv;
start_command_status = start_command(a2l);
a2l->argv = NULL; /* it's not used after start_command; avoid dangling pointers */
if (start_command_status != 0) {
pr_warning("could not start addr2line (%s) for %s: start_command return code %d\n",
addr2line_path, binary_path, start_command_status);
addr2line_subprocess_cleanup(a2l);
return NULL;
}
return a2l;
}
enum a2l_style {
BROKEN,
GNU_BINUTILS,
LLVM,
};
static enum a2l_style addr2line_configure(struct child_process *a2l, const char *dso_name)
{
static bool cached;
static enum a2l_style style;
if (!cached) {
char buf[128];
struct io io;
int ch;
int lines;
if (write(a2l->in, ",\n", 2) != 2)
return BROKEN;
io__init(&io, a2l->out, buf, sizeof(buf));
ch = io__get_char(&io);
if (ch == ',') {
style = LLVM;
cached = true;
lines = 1;
pr_debug("Detected LLVM addr2line style\n");
} else if (ch == '0') {
style = GNU_BINUTILS;
cached = true;
lines = 3;
pr_debug("Detected binutils addr2line style\n");
} else {
if (!symbol_conf.disable_add2line_warn) {
char *output = NULL;
size_t output_len;
io__getline(&io, &output, &output_len);
pr_warning("%s %s: addr2line configuration failed\n",
__func__, dso_name);
pr_warning("\t%c%s", ch, output);
}
pr_debug("Unknown/broken addr2line style\n");
return BROKEN;
}
while (lines) {
ch = io__get_char(&io);
if (ch <= 0)
break;
if (ch == '\n')
lines--;
}
/* Ignore SIGPIPE in the event addr2line exits. */
signal(SIGPIPE, SIG_IGN);
}
return style;
}
static int read_addr2line_record(struct io *io,
enum a2l_style style,
const char *dso_name,
u64 addr,
bool first,
char **function,
char **filename,
unsigned int *line_nr)
{
/*
* Returns:
* -1 ==> error
* 0 ==> sentinel (or other ill-formed) record read
* 1 ==> a genuine record read
*/
char *line = NULL;
size_t line_len = 0;
unsigned int dummy_line_nr = 0;
int ret = -1;
if (function != NULL)
zfree(function);
if (filename != NULL)
zfree(filename);
if (line_nr != NULL)
*line_nr = 0;
/*
* Read the first line. Without an error this will be:
* - for the first line an address like 0x1234,
* - the binutils sentinel 0x0000000000000000,
* - the llvm-addr2line the sentinel ',' character,
* - the function name line for an inlined function.
*/
if (io__getline(io, &line, &line_len) < 0 || !line_len)
goto error;
pr_debug("%s %s: addr2line read address for sentinel: %s", __func__, dso_name, line);
if (style == LLVM && line_len == 2 && line[0] == ',') {
/* Found the llvm-addr2line sentinel character. */
zfree(&line);
return 0;
} else if (style == GNU_BINUTILS && (!first || addr != 0)) {
int zero_count = 0, non_zero_count = 0;
/*
* Check for binutils sentinel ignoring it for the case the
* requested address is 0.
*/
/* A given address should always start 0x. */
if (line_len >= 2 || line[0] != '0' || line[1] != 'x') {
for (size_t i = 2; i < line_len; i++) {
if (line[i] == '0')
zero_count++;
else if (line[i] != '\n')
non_zero_count++;
}
if (!non_zero_count) {
int ch;
if (first && !zero_count) {
/* Line was erroneous just '0x'. */
goto error;
}
/*
* Line was 0x0..0, the sentinel for binutils. Remove
* the function and filename lines.
*/
zfree(&line);
do {
ch = io__get_char(io);
} while (ch > 0 && ch != '\n');
do {
ch = io__get_char(io);
} while (ch > 0 && ch != '\n');
return 0;
}
}
}
/* Read the second function name line (if inline data then this is the first line). */
if (first && (io__getline(io, &line, &line_len) < 0 || !line_len))
goto error;
pr_debug("%s %s: addr2line read line: %s", __func__, dso_name, line);
if (function != NULL)
*function = strdup(strim(line));
zfree(&line);
line_len = 0;
/* Read the third filename and line number line. */
if (io__getline(io, &line, &line_len) < 0 || !line_len)
goto error;
pr_debug("%s %s: addr2line filename:number : %s", __func__, dso_name, line);
if (filename_split(line, line_nr == NULL ? &dummy_line_nr : line_nr) == 0 &&
style == GNU_BINUTILS) {
ret = 0;
goto error;
}
if (filename != NULL)
*filename = strdup(line);
zfree(&line);
line_len = 0;
return 1;
error:
free(line);
if (function != NULL)
zfree(function);
if (filename != NULL)
zfree(filename);
return ret;
}
static int inline_list__append_record(struct dso *dso,
struct inline_node *node,
struct symbol *sym,
const char *function,
const char *filename,
unsigned int line_nr)
{
struct symbol *inline_sym = new_inline_sym(dso, sym, function);
return inline_list__append(inline_sym, srcline_from_fileline(filename, line_nr), node);
}
static int addr2line(const char *dso_name, u64 addr,
char **file, unsigned int *line_nr,
struct dso *dso,
bool unwind_inlines,
struct inline_node *node,
struct symbol *sym __maybe_unused)
{
struct child_process *a2l = dso->a2l;
char *record_function = NULL;
char *record_filename = NULL;
unsigned int record_line_nr = 0;
int record_status = -1;
int ret = 0;
size_t inline_count = 0;
int len;
char buf[128];
ssize_t written;
struct io io = { .eof = false };
enum a2l_style a2l_style;
if (!a2l) {
if (!filename__has_section(dso_name, ".debug_line"))
goto out;
dso->a2l = addr2line_subprocess_init(symbol_conf.addr2line_path, dso_name);
a2l = dso->a2l;
}
if (a2l == NULL) {
if (!symbol_conf.disable_add2line_warn)
pr_warning("%s %s: addr2line_subprocess_init failed\n", __func__, dso_name);
goto out;
}
a2l_style = addr2line_configure(a2l, dso_name);
if (a2l_style == BROKEN)
goto out;
/*
* Send our request and then *deliberately* send something that can't be
* interpreted as a valid address to ask addr2line about (namely,
* ","). This causes addr2line to first write out the answer to our
* request, in an unbounded/unknown number of records, and then to write
* out the lines "0x0...0", "??" and "??:0", for GNU binutils, or ","
* for llvm-addr2line, so that we can detect when it has finished giving
* us anything useful.
*/
len = snprintf(buf, sizeof(buf), "%016"PRIx64"\n,\n", addr);
written = len > 0 ? write(a2l->in, buf, len) : -1;
if (written != len) {
if (!symbol_conf.disable_add2line_warn)
pr_warning("%s %s: could not send request\n", __func__, dso_name);
goto out;
}
io__init(&io, a2l->out, buf, sizeof(buf));
io.timeout_ms = addr2line_timeout_ms;
switch (read_addr2line_record(&io, a2l_style, dso_name, addr, /*first=*/true,
&record_function, &record_filename, &record_line_nr)) {
case -1:
if (!symbol_conf.disable_add2line_warn)
pr_warning("%s %s: could not read first record\n", __func__, dso_name);
goto out;
case 0:
/*
* The first record was invalid, so return failure, but first
* read another record, since we sent a sentinel ',' for the
* sake of detected the last inlined function. Treat this as the
* first of a record as the ',' generates a new start with GNU
* binutils, also force a non-zero address as we're no longer
* reading that record.
*/
switch (read_addr2line_record(&io, a2l_style, dso_name,
/*addr=*/1, /*first=*/true,
NULL, NULL, NULL)) {
case -1:
if (!symbol_conf.disable_add2line_warn)
pr_warning("%s %s: could not read sentinel record\n",
__func__, dso_name);
break;
case 0:
/* The sentinel as expected. */
break;
default:
if (!symbol_conf.disable_add2line_warn)
pr_warning("%s %s: unexpected record instead of sentinel",
__func__, dso_name);
break;
}
goto out;
default:
/* First record as expected. */
break;
}
if (file) {
*file = strdup(record_filename);
ret = 1;
}
if (line_nr)
*line_nr = record_line_nr;
if (unwind_inlines) {
if (node && inline_list__append_record(dso, node, sym,
record_function,
record_filename,
record_line_nr)) {
ret = 0;
goto out;
}
}
/*
* We have to read the records even if we don't care about the inline
* info. This isn't the first record and force the address to non-zero
* as we're reading records beyond the first.
*/
while ((record_status = read_addr2line_record(&io,
a2l_style,
dso_name,
/*addr=*/1,
/*first=*/false,
&record_function,
&record_filename,
&record_line_nr)) == 1) {
if (unwind_inlines && node && inline_count++ < MAX_INLINE_NEST) {
if (inline_list__append_record(dso, node, sym,
record_function,
record_filename,
record_line_nr)) {
ret = 0;
goto out;
}
ret = 1; /* found at least one inline frame */
}
}
out:
free(record_function);
free(record_filename);
if (io.eof) {
dso->a2l = NULL;
addr2line_subprocess_cleanup(a2l);
}
return ret;
}
void dso__free_a2l(struct dso *dso)
{
struct child_process *a2l = dso->a2l;
if (!a2l)
return;
addr2line_subprocess_cleanup(a2l);
dso->a2l = NULL;
}
#endif /* HAVE_LIBBFD_SUPPORT */
static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
struct dso *dso, struct symbol *sym)
{
struct inline_node *node;
node = zalloc(sizeof(*node));
if (node == NULL) {
perror("not enough memory for the inline node");
return NULL;
}
INIT_LIST_HEAD(&node->val);
node->addr = addr;
addr2line(dso_name, addr, NULL, NULL, dso, true, node, sym);
return node;
}
/*
* Number of addr2line failures (without success) before disabling it for that
* dso.
*/
#define A2L_FAIL_LIMIT 123
char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym, bool show_addr, bool unwind_inlines,
u64 ip)
{
char *file = NULL;
unsigned line = 0;
char *srcline;
const char *dso_name;
if (!dso->has_srcline)
goto out;
dso_name = dso__name(dso);
if (dso_name == NULL)
goto out;
if (!addr2line(dso_name, addr, &file, &line, dso,
unwind_inlines, NULL, sym))
goto out;
srcline = srcline_from_fileline(file, line);
free(file);
if (!srcline)
goto out;
dso->a2l_fails = 0;
return srcline;
out:
if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
dso->has_srcline = 0;
dso__free_a2l(dso);
}
if (!show_addr)
return (show_sym && sym) ?
strndup(sym->name, sym->namelen) : SRCLINE_UNKNOWN;
if (sym) {
if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "",
ip - sym->start) < 0)
return SRCLINE_UNKNOWN;
} else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0)
return SRCLINE_UNKNOWN;
return srcline;
}
/* Returns filename and fills in line number in line */
char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line)
{
char *file = NULL;
const char *dso_name;
if (!dso->has_srcline)
goto out;
dso_name = dso__name(dso);
if (dso_name == NULL)
goto out;
if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL))
goto out;
dso->a2l_fails = 0;
return file;
out:
if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
dso->has_srcline = 0;
dso__free_a2l(dso);
}
return NULL;
}
void zfree_srcline(char **srcline)
{
if (*srcline == NULL)
return;
if (*srcline != SRCLINE_UNKNOWN)
free(*srcline);
*srcline = NULL;
}
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym, bool show_addr, u64 ip)
{
return __get_srcline(dso, addr, sym, show_sym, show_addr, false, ip);
}
struct srcline_node {
u64 addr;
char *srcline;
struct rb_node rb_node;
};
void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline)
{
struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
struct srcline_node *i, *node;
bool leftmost = true;
node = zalloc(sizeof(struct srcline_node));
if (!node) {
perror("not enough memory for the srcline node");
return;
}
node->addr = addr;
node->srcline = srcline;
while (*p != NULL) {
parent = *p;
i = rb_entry(parent, struct srcline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&node->rb_node, parent, p);
rb_insert_color_cached(&node->rb_node, tree, leftmost);
}
char *srcline__tree_find(struct rb_root_cached *tree, u64 addr)
{
struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct srcline_node *i = rb_entry(n, struct srcline_node,
rb_node);
if (addr < i->addr)
n = n->rb_left;
else if (addr > i->addr)
n = n->rb_right;
else
return i->srcline;
}
return NULL;
}
void srcline__tree_delete(struct rb_root_cached *tree)
{
struct srcline_node *pos;
struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct srcline_node, rb_node);
next = rb_next(&pos->rb_node);
rb_erase_cached(&pos->rb_node, tree);
zfree_srcline(&pos->srcline);
zfree(&pos);
}
}
struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
struct symbol *sym)
{
const char *dso_name;
dso_name = dso__name(dso);
if (dso_name == NULL)
return NULL;
return addr2inlines(dso_name, addr, dso, sym);
}
void inline_node__delete(struct inline_node *node)
{
struct inline_list *ilist, *tmp;
list_for_each_entry_safe(ilist, tmp, &node->val, list) {
list_del_init(&ilist->list);
zfree_srcline(&ilist->srcline);
/* only the inlined symbols are owned by the list */
if (ilist->symbol && ilist->symbol->inlined)
symbol__delete(ilist->symbol);
free(ilist);
}
free(node);
}
void inlines__tree_insert(struct rb_root_cached *tree,
struct inline_node *inlines)
{
struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 addr = inlines->addr;
struct inline_node *i;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
i = rb_entry(parent, struct inline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&inlines->rb_node, parent, p);
rb_insert_color_cached(&inlines->rb_node, tree, leftmost);
}
struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr)
{
struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct inline_node *i = rb_entry(n, struct inline_node,
rb_node);
if (addr < i->addr)
n = n->rb_left;
else if (addr > i->addr)
n = n->rb_right;
else
return i;
}
return NULL;
}
void inlines__tree_delete(struct rb_root_cached *tree)
{
struct inline_node *pos;
struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct inline_node, rb_node);
next = rb_next(&pos->rb_node);
rb_erase_cached(&pos->rb_node, tree);
inline_node__delete(pos);
}
}
| linux-master | tools/perf/util/srcline.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*
* Parts came from builtin-annotate.c, see those files for further
* copyright notes.
*/
#include <errno.h>
#include <inttypes.h>
#include <libgen.h>
#include <stdlib.h>
#include "util.h" // hex_width()
#include "ui/ui.h"
#include "sort.h"
#include "build-id.h"
#include "color.h"
#include "config.h"
#include "dso.h"
#include "env.h"
#include "map.h"
#include "maps.h"
#include "symbol.h"
#include "srcline.h"
#include "units.h"
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
#include "evlist.h"
#include "bpf-event.h"
#include "bpf-utils.h"
#include "block-range.h"
#include "string2.h"
#include "util/event.h"
#include "util/sharded_mutex.h"
#include "arch/common.h"
#include "namespaces.h"
#include <regex.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <subcmd/parse-options.h>
#include <subcmd/run-command.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
/*
* FIXME: Using the same values as slang.h,
* but that header may not be available everywhere
*/
#define LARROW_CHAR ((unsigned char)',')
#define RARROW_CHAR ((unsigned char)'+')
#define DARROW_CHAR ((unsigned char)'.')
#define UARROW_CHAR ((unsigned char)'-')
#include <linux/ctype.h>
static regex_t file_lineno;
static struct ins_ops *ins__find(struct arch *arch, const char *name);
static void ins__sort(struct arch *arch);
static int disasm_line__parse(char *line, const char **namep, char **rawp);
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
struct arch {
const char *name;
struct ins *instructions;
size_t nr_instructions;
size_t nr_instructions_allocated;
struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
bool sorted_instructions;
bool initialized;
const char *insn_suffix;
void *priv;
unsigned int model;
unsigned int family;
int (*init)(struct arch *arch, char *cpuid);
bool (*ins_is_fused)(struct arch *arch, const char *ins1,
const char *ins2);
struct {
char comment_char;
char skip_functions_char;
} objdump;
};
static struct ins_ops call_ops;
static struct ins_ops dec_ops;
static struct ins_ops jump_ops;
static struct ins_ops mov_ops;
static struct ins_ops nop_ops;
static struct ins_ops lock_ops;
static struct ins_ops ret_ops;
static int arch__grow_instructions(struct arch *arch)
{
struct ins *new_instructions;
size_t new_nr_allocated;
if (arch->nr_instructions_allocated == 0 && arch->instructions)
goto grow_from_non_allocated_table;
new_nr_allocated = arch->nr_instructions_allocated + 128;
new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
if (new_instructions == NULL)
return -1;
out_update_instructions:
arch->instructions = new_instructions;
arch->nr_instructions_allocated = new_nr_allocated;
return 0;
grow_from_non_allocated_table:
new_nr_allocated = arch->nr_instructions + 128;
new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
if (new_instructions == NULL)
return -1;
memcpy(new_instructions, arch->instructions, arch->nr_instructions);
goto out_update_instructions;
}
static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
{
struct ins *ins;
if (arch->nr_instructions == arch->nr_instructions_allocated &&
arch__grow_instructions(arch))
return -1;
ins = &arch->instructions[arch->nr_instructions];
ins->name = strdup(name);
if (!ins->name)
return -1;
ins->ops = ops;
arch->nr_instructions++;
ins__sort(arch);
return 0;
}
#include "arch/arc/annotate/instructions.c"
#include "arch/arm/annotate/instructions.c"
#include "arch/arm64/annotate/instructions.c"
#include "arch/csky/annotate/instructions.c"
#include "arch/loongarch/annotate/instructions.c"
#include "arch/mips/annotate/instructions.c"
#include "arch/x86/annotate/instructions.c"
#include "arch/powerpc/annotate/instructions.c"
#include "arch/riscv64/annotate/instructions.c"
#include "arch/s390/annotate/instructions.c"
#include "arch/sparc/annotate/instructions.c"
static struct arch architectures[] = {
{
.name = "arc",
.init = arc__annotate_init,
},
{
.name = "arm",
.init = arm__annotate_init,
},
{
.name = "arm64",
.init = arm64__annotate_init,
},
{
.name = "csky",
.init = csky__annotate_init,
},
{
.name = "mips",
.init = mips__annotate_init,
.objdump = {
.comment_char = '#',
},
},
{
.name = "x86",
.init = x86__annotate_init,
.instructions = x86__instructions,
.nr_instructions = ARRAY_SIZE(x86__instructions),
.insn_suffix = "bwlq",
.objdump = {
.comment_char = '#',
},
},
{
.name = "powerpc",
.init = powerpc__annotate_init,
},
{
.name = "riscv64",
.init = riscv64__annotate_init,
},
{
.name = "s390",
.init = s390__annotate_init,
.objdump = {
.comment_char = '#',
},
},
{
.name = "sparc",
.init = sparc__annotate_init,
.objdump = {
.comment_char = '#',
},
},
{
.name = "loongarch",
.init = loongarch__annotate_init,
.objdump = {
.comment_char = '#',
},
},
};
static void ins__delete(struct ins_operands *ops)
{
if (ops == NULL)
return;
zfree(&ops->source.raw);
zfree(&ops->source.name);
zfree(&ops->target.raw);
zfree(&ops->target.name);
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name)
{
return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name)
{
if (ins->ops->scnprintf)
return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
}
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
{
if (!arch || !arch->ins_is_fused)
return false;
return arch->ins_is_fused(arch, ins1, ins2);
}
static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
{
char *endptr, *tok, *name;
struct map *map = ms->map;
struct addr_map_symbol target = {
.ms = { .map = map, },
};
ops->target.addr = strtoull(ops->raw, &endptr, 16);
name = strchr(endptr, '<');
if (name == NULL)
goto indirect_call;
name++;
if (arch->objdump.skip_functions_char &&
strchr(name, arch->objdump.skip_functions_char))
return -1;
tok = strchr(name, '>');
if (tok == NULL)
return -1;
*tok = '\0';
ops->target.name = strdup(name);
*tok = '>';
if (ops->target.name == NULL)
return -1;
find_target:
target.addr = map__objdump_2mem(map, ops->target.addr);
if (maps__find_ams(ms->maps, &target) == 0 &&
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
ops->target.sym = target.ms.sym;
return 0;
indirect_call:
tok = strchr(endptr, '*');
if (tok != NULL) {
endptr++;
/* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
* Do not parse such instruction. */
if (strstr(endptr, "(%r") == NULL)
ops->target.addr = strtoull(endptr, NULL, 16);
}
goto find_target;
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name)
{
if (ops->target.sym)
return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
if (ops->target.addr == 0)
return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.name)
return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
.parse = call__parse,
.scnprintf = call__scnprintf,
};
bool ins__is_call(const struct ins *ins)
{
return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops;
}
/*
* Prevents from matching commas in the comment section, e.g.:
* ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
*
* and skip comma as part of function arguments, e.g.:
* 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc>
*/
static inline const char *validate_comma(const char *c, struct ins_operands *ops)
{
if (ops->raw_comment && c > ops->raw_comment)
return NULL;
if (ops->raw_func_start && c > ops->raw_func_start)
return NULL;
return c;
}
static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
{
struct map *map = ms->map;
struct symbol *sym = ms->sym;
struct addr_map_symbol target = {
.ms = { .map = map, },
};
const char *c = strchr(ops->raw, ',');
u64 start, end;
ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
ops->raw_func_start = strchr(ops->raw, '<');
c = validate_comma(c, ops);
/*
* Examples of lines to parse for the _cpp_lex_token@@Base
* function:
*
* 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92>
* 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72>
*
* The first is a jump to an offset inside the same function,
* the second is to another function, i.e. that 0xa72 is an
* offset in the cpp_named_operator2name@@base function.
*/
/*
* skip over possible up to 2 operands to get to address, e.g.:
* tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
*/
if (c++ != NULL) {
ops->target.addr = strtoull(c, NULL, 16);
if (!ops->target.addr) {
c = strchr(c, ',');
c = validate_comma(c, ops);
if (c++ != NULL)
ops->target.addr = strtoull(c, NULL, 16);
}
} else {
ops->target.addr = strtoull(ops->raw, NULL, 16);
}
target.addr = map__objdump_2mem(map, ops->target.addr);
start = map__unmap_ip(map, sym->start);
end = map__unmap_ip(map, sym->end);
ops->target.outside = target.addr < start || target.addr > end;
/*
* FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
cpp_named_operator2name@@Base+0xa72
* Point to a place that is after the cpp_named_operator2name
* boundaries, i.e. in the ELF symbol table for cc1
* cpp_named_operator2name is marked as being 32-bytes long, but it in
* fact is much larger than that, so we seem to need a symbols__find()
* routine that looks for >= current->start and < next_symbol->start,
* possibly just for C++ objects?
*
* For now lets just make some progress by marking jumps to outside the
* current function as call like.
*
* Actual navigation will come next, with further understanding of how
* the symbol searching and disassembly should be done.
*/
if (maps__find_ams(ms->maps, &target) == 0 &&
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
ops->target.sym = target.ms.sym;
if (!ops->target.outside) {
ops->target.offset = target.addr - start;
ops->target.offset_avail = true;
} else {
ops->target.offset_avail = false;
}
return 0;
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name)
{
const char *c;
if (!ops->target.addr || ops->target.offset < 0)
return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.outside && ops->target.sym != NULL)
return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
c = strchr(ops->raw, ',');
c = validate_comma(c, ops);
if (c != NULL) {
const char *c2 = strchr(c + 1, ',');
c2 = validate_comma(c2, ops);
/* check for 3-op insn */
if (c2 != NULL)
c = c2;
c++;
/* mirror arch objdump's space-after-comma style */
if (*c == ' ')
c++;
}
return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
ins->name, c ? c - ops->raw : 0, ops->raw,
ops->target.offset);
}
static struct ins_ops jump_ops = {
.parse = jump__parse,
.scnprintf = jump__scnprintf,
};
bool ins__is_jump(const struct ins *ins)
{
return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops;
}
static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
{
char *endptr, *name, *t;
if (strstr(raw, "(%rip)") == NULL)
return 0;
*addrp = strtoull(comment, &endptr, 16);
if (endptr == comment)
return 0;
name = strchr(endptr, '<');
if (name == NULL)
return -1;
name++;
t = strchr(name, '>');
if (t == NULL)
return 0;
*t = '\0';
*namep = strdup(name);
*t = '>';
return 0;
}
static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
{
ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
if (ops->locked.ops == NULL)
return 0;
if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
goto out_free_ops;
ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
if (ops->locked.ins.ops == NULL)
goto out_free_ops;
if (ops->locked.ins.ops->parse &&
ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0)
goto out_free_ops;
return 0;
out_free_ops:
zfree(&ops->locked.ops);
return 0;
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name)
{
int printed;
if (ops->locked.ins.ops == NULL)
return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
size - printed, ops->locked.ops, max_ins_name);
}
static void lock__delete(struct ins_operands *ops)
{
struct ins *ins = &ops->locked.ins;
if (ins->ops && ins->ops->free)
ins->ops->free(ops->locked.ops);
else
ins__delete(ops->locked.ops);
zfree(&ops->locked.ops);
zfree(&ops->target.raw);
zfree(&ops->target.name);
}
static struct ins_ops lock_ops = {
.free = lock__delete,
.parse = lock__parse,
.scnprintf = lock__scnprintf,
};
static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *comment, prev;
if (s == NULL)
return -1;
*s = '\0';
/*
* x86 SIB addressing has something like 0x8(%rax, %rcx, 1)
* then it needs to have the closing parenthesis.
*/
if (strchr(ops->raw, '(')) {
*s = ',';
s = strchr(ops->raw, ')');
if (s == NULL || s[1] != ',')
return -1;
*++s = '\0';
}
ops->source.raw = strdup(ops->raw);
*s = ',';
if (ops->source.raw == NULL)
return -1;
target = skip_spaces(++s);
comment = strchr(s, arch->objdump.comment_char);
if (comment != NULL)
s = comment - 1;
else
s = strchr(s, '\0') - 1;
while (s > target && isspace(s[0]))
--s;
s++;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
goto out_free_source;
if (comment == NULL)
return 0;
comment = skip_spaces(comment);
comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
out_free_source:
zfree(&ops->source.raw);
return -1;
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name)
{
return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops mov_ops = {
.parse = mov__parse,
.scnprintf = mov__scnprintf,
};
static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
{
char *target, *comment, *s, prev;
target = s = ops->raw;
while (s[0] != '\0' && !isspace(s[0]))
++s;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
return -1;
comment = strchr(s, arch->objdump.comment_char);
if (comment == NULL)
return 0;
comment = skip_spaces(comment);
comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name)
{
return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops dec_ops = {
.parse = dec__parse,
.scnprintf = dec__scnprintf,
};
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
struct ins_operands *ops __maybe_unused, int max_ins_name)
{
return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
}
static struct ins_ops nop_ops = {
.scnprintf = nop__scnprintf,
};
static struct ins_ops ret_ops = {
.scnprintf = ins__raw_scnprintf,
};
bool ins__is_ret(const struct ins *ins)
{
return ins->ops == &ret_ops;
}
bool ins__is_lock(const struct ins *ins)
{
return ins->ops == &lock_ops;
}
static int ins__key_cmp(const void *name, const void *insp)
{
const struct ins *ins = insp;
return strcmp(name, ins->name);
}
static int ins__cmp(const void *a, const void *b)
{
const struct ins *ia = a;
const struct ins *ib = b;
return strcmp(ia->name, ib->name);
}
static void ins__sort(struct arch *arch)
{
const int nmemb = arch->nr_instructions;
qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
}
static struct ins_ops *__ins__find(struct arch *arch, const char *name)
{
struct ins *ins;
const int nmemb = arch->nr_instructions;
if (!arch->sorted_instructions) {
ins__sort(arch);
arch->sorted_instructions = true;
}
ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
if (ins)
return ins->ops;
if (arch->insn_suffix) {
char tmp[32];
char suffix;
size_t len = strlen(name);
if (len == 0 || len >= sizeof(tmp))
return NULL;
suffix = name[len - 1];
if (strchr(arch->insn_suffix, suffix) == NULL)
return NULL;
strcpy(tmp, name);
tmp[len - 1] = '\0'; /* remove the suffix and check again */
ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
}
return ins ? ins->ops : NULL;
}
static struct ins_ops *ins__find(struct arch *arch, const char *name)
{
struct ins_ops *ops = __ins__find(arch, name);
if (!ops && arch->associate_instruction_ops)
ops = arch->associate_instruction_ops(arch, name);
return ops;
}
static int arch__key_cmp(const void *name, const void *archp)
{
const struct arch *arch = archp;
return strcmp(name, arch->name);
}
static int arch__cmp(const void *a, const void *b)
{
const struct arch *aa = a;
const struct arch *ab = b;
return strcmp(aa->name, ab->name);
}
static void arch__sort(void)
{
const int nmemb = ARRAY_SIZE(architectures);
qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
}
static struct arch *arch__find(const char *name)
{
const int nmemb = ARRAY_SIZE(architectures);
static bool sorted;
if (!sorted) {
arch__sort();
sorted = true;
}
return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
}
static struct annotated_source *annotated_source__new(void)
{
struct annotated_source *src = zalloc(sizeof(*src));
if (src != NULL)
INIT_LIST_HEAD(&src->source);
return src;
}
static __maybe_unused void annotated_source__delete(struct annotated_source *src)
{
if (src == NULL)
return;
zfree(&src->histograms);
zfree(&src->cycles_hist);
free(src);
}
static int annotated_source__alloc_histograms(struct annotated_source *src,
size_t size, int nr_hists)
{
size_t sizeof_sym_hist;
/*
* Add buffer of one element for zero length symbol.
* When sample is taken from first instruction of
* zero length symbol, perf still resolves it and
* shows symbol name in perf report and allows to
* annotate it.
*/
if (size == 0)
size = 1;
/* Check for overflow when calculating sizeof_sym_hist */
if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry))
return -1;
sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry));
/* Check for overflow in zalloc argument */
if (sizeof_sym_hist > SIZE_MAX / nr_hists)
return -1;
src->sizeof_sym_hist = sizeof_sym_hist;
src->nr_histograms = nr_hists;
src->histograms = calloc(nr_hists, sizeof_sym_hist) ;
return src->histograms ? 0 : -1;
}
/* The cycles histogram is lazily allocated. */
static int symbol__alloc_hist_cycles(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
const size_t size = symbol__size(sym);
notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
if (notes->src->cycles_hist == NULL)
return -1;
return 0;
}
void symbol__annotate_zero_histograms(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
annotation__lock(notes);
if (notes->src != NULL) {
memset(notes->src->histograms, 0,
notes->src->nr_histograms * notes->src->sizeof_sym_hist);
if (notes->src->cycles_hist)
memset(notes->src->cycles_hist, 0,
symbol__size(sym) * sizeof(struct cyc_hist));
}
annotation__unlock(notes);
}
static int __symbol__account_cycles(struct cyc_hist *ch,
u64 start,
unsigned offset, unsigned cycles,
unsigned have_start)
{
/*
* For now we can only account one basic block per
* final jump. But multiple could be overlapping.
* Always account the longest one. So when
* a shorter one has been already seen throw it away.
*
* We separately always account the full cycles.
*/
ch[offset].num_aggr++;
ch[offset].cycles_aggr += cycles;
if (cycles > ch[offset].cycles_max)
ch[offset].cycles_max = cycles;
if (ch[offset].cycles_min) {
if (cycles && cycles < ch[offset].cycles_min)
ch[offset].cycles_min = cycles;
} else
ch[offset].cycles_min = cycles;
if (!have_start && ch[offset].have_start)
return 0;
if (ch[offset].num) {
if (have_start && (!ch[offset].have_start ||
ch[offset].start > start)) {
ch[offset].have_start = 0;
ch[offset].cycles = 0;
ch[offset].num = 0;
if (ch[offset].reset < 0xffff)
ch[offset].reset++;
} else if (have_start &&
ch[offset].start < start)
return 0;
}
if (ch[offset].num < NUM_SPARKS)
ch[offset].cycles_spark[ch[offset].num] = cycles;
ch[offset].have_start = have_start;
ch[offset].start = start;
ch[offset].cycles += cycles;
ch[offset].num++;
return 0;
}
static int __symbol__inc_addr_samples(struct map_symbol *ms,
struct annotated_source *src, int evidx, u64 addr,
struct perf_sample *sample)
{
struct symbol *sym = ms->sym;
unsigned offset;
struct sym_hist *h;
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
if ((addr < sym->start || addr >= sym->end) &&
(addr != sym->end || sym->start != sym->end)) {
pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
__func__, __LINE__, sym->name, sym->start, addr, sym->end);
return -ERANGE;
}
offset = addr - sym->start;
h = annotated_source__histogram(src, evidx);
if (h == NULL) {
pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
__func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
return -ENOMEM;
}
h->nr_samples++;
h->addr[offset].nr_samples++;
h->period += sample->period;
h->addr[offset].period += sample->period;
pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
sym->start, sym->name, addr, addr - sym->start, evidx,
h->addr[offset].nr_samples, h->addr[offset].period);
return 0;
}
static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
notes->src = annotated_source__new();
if (notes->src == NULL)
return NULL;
goto alloc_cycles_hist;
}
if (!notes->src->cycles_hist) {
alloc_cycles_hist:
symbol__alloc_hist_cycles(sym);
}
return notes->src->cycles_hist;
}
struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
notes->src = annotated_source__new();
if (notes->src == NULL)
return NULL;
goto alloc_histograms;
}
if (notes->src->histograms == NULL) {
alloc_histograms:
annotated_source__alloc_histograms(notes->src, symbol__size(sym),
nr_hists);
}
return notes->src;
}
static int symbol__inc_addr_samples(struct map_symbol *ms,
struct evsel *evsel, u64 addr,
struct perf_sample *sample)
{
struct symbol *sym = ms->sym;
struct annotated_source *src;
if (sym == NULL)
return 0;
src = symbol__hists(sym, evsel->evlist->core.nr_entries);
return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
}
static int symbol__account_cycles(u64 addr, u64 start,
struct symbol *sym, unsigned cycles)
{
struct cyc_hist *cycles_hist;
unsigned offset;
if (sym == NULL)
return 0;
cycles_hist = symbol__cycles_hist(sym);
if (cycles_hist == NULL)
return -ENOMEM;
if (addr < sym->start || addr >= sym->end)
return -ERANGE;
if (start) {
if (start < sym->start || start >= sym->end)
return -ERANGE;
if (start >= addr)
start = 0;
}
offset = addr - sym->start;
return __symbol__account_cycles(cycles_hist,
start ? start - sym->start : 0,
offset, cycles,
!!start);
}
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
struct addr_map_symbol *start,
unsigned cycles)
{
u64 saddr = 0;
int err;
if (!cycles)
return 0;
/*
* Only set start when IPC can be computed. We can only
* compute it when the basic block is completely in a single
* function.
* Special case the case when the jump is elsewhere, but
* it starts on the function start.
*/
if (start &&
(start->ms.sym == ams->ms.sym ||
(ams->ms.sym &&
start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
saddr = start->al_addr;
if (saddr == 0)
pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
ams->addr,
start ? start->addr : 0,
ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
saddr);
err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
if (err)
pr_debug2("account_cycles failed %d\n", err);
return err;
}
static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
{
unsigned n_insn = 0;
u64 offset;
for (offset = start; offset <= end; offset++) {
if (notes->offsets[offset])
n_insn++;
}
return n_insn;
}
static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
{
unsigned n_insn;
unsigned int cover_insn = 0;
u64 offset;
n_insn = annotation__count_insn(notes, start, end);
if (n_insn && ch->num && ch->cycles) {
float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
/* Hide data when there are too many overlaps. */
if (ch->reset >= 0x7fff)
return;
for (offset = start; offset <= end; offset++) {
struct annotation_line *al = notes->offsets[offset];
if (al && al->ipc == 0.0) {
al->ipc = ipc;
cover_insn++;
}
}
if (cover_insn) {
notes->hit_cycles += ch->cycles;
notes->hit_insn += n_insn * ch->num;
notes->cover_insn += cover_insn;
}
}
}
void annotation__compute_ipc(struct annotation *notes, size_t size)
{
s64 offset;
if (!notes->src || !notes->src->cycles_hist)
return;
notes->total_insn = annotation__count_insn(notes, 0, size - 1);
notes->hit_cycles = 0;
notes->hit_insn = 0;
notes->cover_insn = 0;
annotation__lock(notes);
for (offset = size - 1; offset >= 0; --offset) {
struct cyc_hist *ch;
ch = ¬es->src->cycles_hist[offset];
if (ch && ch->cycles) {
struct annotation_line *al;
if (ch->have_start)
annotation__count_and_fill(notes, ch->start, offset, ch);
al = notes->offsets[offset];
if (al && ch->num_aggr) {
al->cycles = ch->cycles_aggr / ch->num_aggr;
al->cycles_max = ch->cycles_max;
al->cycles_min = ch->cycles_min;
}
notes->have_cycles = true;
}
}
annotation__unlock(notes);
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
struct evsel *evsel)
{
return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
}
int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
struct evsel *evsel, u64 ip)
{
return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
}
static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
{
dl->ins.ops = ins__find(arch, dl->ins.name);
if (!dl->ins.ops)
return;
if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0)
dl->ins.ops = NULL;
}
static int disasm_line__parse(char *line, const char **namep, char **rawp)
{
char tmp, *name = skip_spaces(line);
if (name[0] == '\0')
return -1;
*rawp = name + 1;
while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
++*rawp;
tmp = (*rawp)[0];
(*rawp)[0] = '\0';
*namep = strdup(name);
if (*namep == NULL)
goto out;
(*rawp)[0] = tmp;
*rawp = strim(*rawp);
return 0;
out:
return -1;
}
struct annotate_args {
struct arch *arch;
struct map_symbol ms;
struct evsel *evsel;
struct annotation_options *options;
s64 offset;
char *line;
int line_nr;
char *fileloc;
};
static void annotation_line__init(struct annotation_line *al,
struct annotate_args *args,
int nr)
{
al->offset = args->offset;
al->line = strdup(args->line);
al->line_nr = args->line_nr;
al->fileloc = args->fileloc;
al->data_nr = nr;
}
static void annotation_line__exit(struct annotation_line *al)
{
zfree_srcline(&al->path);
zfree(&al->line);
}
static size_t disasm_line_size(int nr)
{
struct annotation_line *al;
return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr));
}
/*
* Allocating the disasm annotation line data with
* following structure:
*
* -------------------------------------------
* struct disasm_line | struct annotation_line
* -------------------------------------------
*
* We have 'struct annotation_line' member as last member
* of 'struct disasm_line' to have an easy access.
*/
static struct disasm_line *disasm_line__new(struct annotate_args *args)
{
struct disasm_line *dl = NULL;
int nr = 1;
if (evsel__is_group_event(args->evsel))
nr = args->evsel->core.nr_members;
dl = zalloc(disasm_line_size(nr));
if (!dl)
return NULL;
annotation_line__init(&dl->al, args, nr);
if (dl->al.line == NULL)
goto out_delete;
if (args->offset != -1) {
if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
goto out_free_line;
disasm_line__init_ins(dl, args->arch, &args->ms);
}
return dl;
out_free_line:
zfree(&dl->al.line);
out_delete:
free(dl);
return NULL;
}
void disasm_line__free(struct disasm_line *dl)
{
if (dl->ins.ops && dl->ins.ops->free)
dl->ins.ops->free(&dl->ops);
else
ins__delete(&dl->ops);
zfree(&dl->ins.name);
annotation_line__exit(&dl->al);
free(dl);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
{
if (raw || !dl->ins.ops)
return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
}
void annotation__exit(struct annotation *notes)
{
annotated_source__delete(notes->src);
}
static struct sharded_mutex *sharded_mutex;
static void annotation__init_sharded_mutex(void)
{
/* As many mutexes as there are CPUs. */
sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
}
static size_t annotation__hash(const struct annotation *notes)
{
return (size_t)notes;
}
static struct mutex *annotation__get_mutex(const struct annotation *notes)
{
static pthread_once_t once = PTHREAD_ONCE_INIT;
pthread_once(&once, annotation__init_sharded_mutex);
if (!sharded_mutex)
return NULL;
return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
}
void annotation__lock(struct annotation *notes)
NO_THREAD_SAFETY_ANALYSIS
{
struct mutex *mutex = annotation__get_mutex(notes);
if (mutex)
mutex_lock(mutex);
}
void annotation__unlock(struct annotation *notes)
NO_THREAD_SAFETY_ANALYSIS
{
struct mutex *mutex = annotation__get_mutex(notes);
if (mutex)
mutex_unlock(mutex);
}
bool annotation__trylock(struct annotation *notes)
{
struct mutex *mutex = annotation__get_mutex(notes);
if (!mutex)
return false;
return mutex_trylock(mutex);
}
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
list_add_tail(&al->node, head);
}
struct annotation_line *
annotation_line__next(struct annotation_line *pos, struct list_head *head)
{
list_for_each_entry_continue(pos, head, node)
if (pos->offset >= 0)
return pos;
return NULL;
}
static const char *annotate__address_color(struct block_range *br)
{
double cov = block_range__coverage(br);
if (cov >= 0) {
/* mark red for >75% coverage */
if (cov > 0.75)
return PERF_COLOR_RED;
/* mark dull for <1% coverage */
if (cov < 0.01)
return PERF_COLOR_NORMAL;
}
return PERF_COLOR_MAGENTA;
}
static const char *annotate__asm_color(struct block_range *br)
{
double cov = block_range__coverage(br);
if (cov >= 0) {
/* mark dull for <1% coverage */
if (cov < 0.01)
return PERF_COLOR_NORMAL;
}
return PERF_COLOR_BLUE;
}
static void annotate__branch_printf(struct block_range *br, u64 addr)
{
bool emit_comment = true;
if (!br)
return;
#if 1
if (br->is_target && br->start == addr) {
struct block_range *branch = br;
double p;
/*
* Find matching branch to our target.
*/
while (!branch->is_branch)
branch = block_range__next(branch);
p = 100 *(double)br->entry / branch->coverage;
if (p > 0.1) {
if (emit_comment) {
emit_comment = false;
printf("\t#");
}
/*
* The percentage of coverage joined at this target in relation
* to the next branch.
*/
printf(" +%.2f%%", p);
}
}
#endif
if (br->is_branch && br->end == addr) {
double p = 100*(double)br->taken / br->coverage;
if (p > 0.1) {
if (emit_comment) {
emit_comment = false;
printf("\t#");
}
/*
* The percentage of coverage leaving at this branch, and
* its prediction ratio.
*/
printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
}
}
}
static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
{
s64 offset = dl->al.offset;
const u64 addr = start + offset;
struct block_range *br;
br = block_range__find(addr);
color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
annotate__branch_printf(br, addr);
return 0;
}
static int
annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
struct evsel *evsel, u64 len, int min_pcnt, int printed,
int max_lines, struct annotation_line *queue, int addr_fmt_width,
int percent_type)
{
struct disasm_line *dl = container_of(al, struct disasm_line, al);
static const char *prev_line;
if (al->offset != -1) {
double max_percent = 0.0;
int i, nr_percent = 1;
const char *color;
struct annotation *notes = symbol__annotation(sym);
for (i = 0; i < al->data_nr; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
percent_type);
if (percent > max_percent)
max_percent = percent;
}
if (al->data_nr > nr_percent)
nr_percent = al->data_nr;
if (max_percent < min_pcnt)
return -1;
if (max_lines && printed >= max_lines)
return 1;
if (queue != NULL) {
list_for_each_entry_from(queue, ¬es->src->source, node) {
if (queue == al)
break;
annotation_line__print(queue, sym, start, evsel, len,
0, 0, 1, NULL, addr_fmt_width,
percent_type);
}
}
color = get_percent_color(max_percent);
for (i = 0; i < nr_percent; i++) {
struct annotation_data *data = &al->data[i];
double percent;
percent = annotation_data__percent(data, percent_type);
color = get_percent_color(percent);
if (symbol_conf.show_total_period)
color_fprintf(stdout, color, " %11" PRIu64,
data->he.period);
else if (symbol_conf.show_nr_samples)
color_fprintf(stdout, color, " %7" PRIu64,
data->he.nr_samples);
else
color_fprintf(stdout, color, " %7.2f", percent);
}
printf(" : ");
disasm_line__print(dl, start, addr_fmt_width);
/*
* Also color the filename and line if needed, with
* the same color than the percentage. Don't print it
* twice for close colored addr with the same filename:line
*/
if (al->path) {
if (!prev_line || strcmp(prev_line, al->path)) {
color_fprintf(stdout, color, " // %s", al->path);
prev_line = al->path;
}
}
printf("\n");
} else if (max_lines && printed >= max_lines)
return 1;
else {
int width = symbol_conf.show_total_period ? 12 : 8;
if (queue)
return -1;
if (evsel__is_group_event(evsel))
width *= evsel->core.nr_members;
if (!*al->line)
printf(" %*s:\n", width, " ");
else
printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
}
return 0;
}
/*
* symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
* which looks like following
*
* 0000000000415500 <_init>:
* 415500: sub $0x8,%rsp
* 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
* 41550b: test %rax,%rax
* 41550e: je 415515 <_init+0x15>
* 415510: callq 416e70 <__gmon_start__@plt>
* 415515: add $0x8,%rsp
* 415519: retq
*
* it will be parsed and saved into struct disasm_line as
* <offset> <name> <ops.raw>
*
* The offset will be a relative offset from the start of the symbol and -1
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
static int symbol__parse_objdump_line(struct symbol *sym,
struct annotate_args *args,
char *parsed_line, int *line_nr, char **fileloc)
{
struct map *map = args->ms.map;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
char *tmp;
s64 line_ip, offset = -1;
regmatch_t match[2];
/* /filename:linenr ? Save line number and ignore. */
if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
*line_nr = atoi(parsed_line + match[1].rm_so);
free(*fileloc);
*fileloc = strdup(parsed_line);
return 0;
}
/* Process hex address followed by ':'. */
line_ip = strtoull(parsed_line, &tmp, 16);
if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') {
u64 start = map__rip_2objdump(map, sym->start),
end = map__rip_2objdump(map, sym->end);
offset = line_ip - start;
if ((u64)line_ip < start || (u64)line_ip >= end)
offset = -1;
else
parsed_line = tmp + 1;
}
args->offset = offset;
args->line = parsed_line;
args->line_nr = *line_nr;
args->fileloc = *fileloc;
args->ms.sym = sym;
dl = disasm_line__new(args);
(*line_nr)++;
if (dl == NULL)
return -1;
if (!disasm_line__has_local_offset(dl)) {
dl->ops.target.offset = dl->ops.target.addr -
map__rip_2objdump(map, sym->start);
dl->ops.target.offset_avail = true;
}
/* kcore has no symbols, so add the call target symbol */
if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
struct addr_map_symbol target = {
.addr = dl->ops.target.addr,
.ms = { .map = map, },
};
if (!maps__find_ams(args->ms.maps, &target) &&
target.ms.sym->start == target.al_addr)
dl->ops.target.sym = target.ms.sym;
}
annotation_line__add(&dl->al, ¬es->src->source);
return 0;
}
static __attribute__((constructor)) void symbol__init_regexpr(void)
{
regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
}
static void delete_last_nop(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
struct list_head *list = ¬es->src->source;
struct disasm_line *dl;
while (!list_empty(list)) {
dl = list_entry(list->prev, struct disasm_line, al.node);
if (dl->ins.ops) {
if (dl->ins.ops != &nop_ops)
return;
} else {
if (!strstr(dl->al.line, " nop ") &&
!strstr(dl->al.line, " nopl ") &&
!strstr(dl->al.line, " nopw "))
return;
}
list_del_init(&dl->al.node);
disasm_line__free(dl);
}
}
int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen)
{
struct dso *dso = map__dso(ms->map);
BUG_ON(buflen == 0);
if (errnum >= 0) {
str_error_r(errnum, buf, buflen);
return 0;
}
switch (errnum) {
case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
char bf[SBUILD_ID_SIZE + 15] = " with build id ";
char *build_id_msg = NULL;
if (dso->has_build_id) {
build_id__sprintf(&dso->bid, bf + 15);
build_id_msg = bf;
}
scnprintf(buf, buflen,
"No vmlinux file%s\nwas found in the path.\n\n"
"Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
"Please use:\n\n"
" perf buildid-cache -vu vmlinux\n\n"
"or:\n\n"
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
break;
case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
break;
case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
break;
case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
break;
case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
dso->long_name);
break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
}
return 0;
}
static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
{
char linkname[PATH_MAX];
char *build_id_filename;
char *build_id_path = NULL;
char *pos;
int len;
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso))
return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
if (build_id_filename) {
__symbol__join_symfs(filename, filename_size, build_id_filename);
free(build_id_filename);
} else {
if (dso->has_build_id)
return ENOMEM;
goto fallback;
}
build_id_path = strdup(filename);
if (!build_id_path)
return ENOMEM;
/*
* old style build-id cache has name of XX/XXXXXXX.. while
* new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
* extract the build-id part of dirname in the new style only.
*/
pos = strrchr(build_id_path, '/');
if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
dirname(build_id_path);
if (dso__is_kcore(dso))
goto fallback;
len = readlink(build_id_path, linkname, sizeof(linkname) - 1);
if (len < 0)
goto fallback;
linkname[len] = '\0';
if (strstr(linkname, DSO__NAME_KALLSYMS) ||
access(filename, R_OK)) {
fallback:
/*
* If we don't have build-ids or the build-id file isn't in the
* cache, or is just a kallsyms file, well, lets hope that this
* DSO is the same as when 'perf record' ran.
*/
if (dso->kernel && dso->long_name[0] == '/')
snprintf(filename, filename_size, "%s", dso->long_name);
else
__symbol__join_symfs(filename, filename_size, dso->long_name);
mutex_lock(&dso->lock);
if (access(filename, R_OK) && errno == ENOENT && dso->nsinfo) {
char *new_name = dso__filename_with_chroot(dso, filename);
if (new_name) {
strlcpy(filename, new_name, filename_size);
free(new_name);
}
}
mutex_unlock(&dso->lock);
}
free(build_id_path);
return 0;
}
#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
#define PACKAGE "perf"
#include <bfd.h>
#include <dis-asm.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
#include <tools/dis-asm-compat.h>
static int symbol__disassemble_bpf(struct symbol *sym,
struct annotate_args *args)
{
struct annotation *notes = symbol__annotation(sym);
struct annotation_options *opts = args->options;
struct bpf_prog_linfo *prog_linfo = NULL;
struct bpf_prog_info_node *info_node;
int len = sym->end - sym->start;
disassembler_ftype disassemble;
struct map *map = args->ms.map;
struct perf_bpil *info_linear;
struct disassemble_info info;
struct dso *dso = map__dso(map);
int pc = 0, count, sub_id;
struct btf *btf = NULL;
char tpath[PATH_MAX];
size_t buf_size;
int nr_skip = 0;
char *buf;
bfd *bfdf;
int ret;
FILE *s;
if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
sym->name, sym->start, sym->end - sym->start);
memset(tpath, 0, sizeof(tpath));
perf_exe(tpath, sizeof(tpath));
bfdf = bfd_openr(tpath, NULL);
if (bfdf == NULL)
abort();
if (!bfd_check_format(bfdf, bfd_object))
abort();
s = open_memstream(&buf, &buf_size);
if (!s) {
ret = errno;
goto out;
}
init_disassemble_info_compat(&info, s,
(fprintf_ftype) fprintf,
fprintf_styled);
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
dso->bpf_prog.id);
if (!info_node) {
ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
goto out;
}
info_linear = info_node->info_linear;
sub_id = dso->bpf_prog.sub_id;
info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
info.buffer_length = info_linear->info.jited_prog_len;
if (info_linear->info.nr_line_info)
prog_linfo = bpf_prog_linfo__new(&info_linear->info);
if (info_linear->info.btf_id) {
struct btf_node *node;
node = perf_env__find_btf(dso->bpf_prog.env,
info_linear->info.btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
node->data_size);
}
disassemble_init_for_target(&info);
#ifdef DISASM_FOUR_ARGS_SIGNATURE
disassemble = disassembler(info.arch,
bfd_big_endian(bfdf),
info.mach,
bfdf);
#else
disassemble = disassembler(bfdf);
#endif
if (disassemble == NULL)
abort();
fflush(s);
do {
const struct bpf_line_info *linfo = NULL;
struct disasm_line *dl;
size_t prev_buf_size;
const char *srcline;
u64 addr;
addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
count = disassemble(pc, &info);
if (prog_linfo)
linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
addr, sub_id,
nr_skip);
if (linfo && btf) {
srcline = btf__name_by_offset(btf, linfo->line_off);
nr_skip++;
} else
srcline = NULL;
fprintf(s, "\n");
prev_buf_size = buf_size;
fflush(s);
if (!opts->hide_src_code && srcline) {
args->offset = -1;
args->line = strdup(srcline);
args->line_nr = 0;
args->fileloc = NULL;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl) {
annotation_line__add(&dl->al,
¬es->src->source);
}
}
args->offset = pc;
args->line = buf + prev_buf_size;
args->line_nr = 0;
args->fileloc = NULL;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl)
annotation_line__add(&dl->al, ¬es->src->source);
pc += count;
} while (count > 0 && pc < len);
ret = 0;
out:
free(prog_linfo);
btf__free(btf);
fclose(s);
bfd_close(bfdf);
return ret;
}
#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
struct annotate_args *args __maybe_unused)
{
return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
}
#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
static int
symbol__disassemble_bpf_image(struct symbol *sym,
struct annotate_args *args)
{
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
args->offset = -1;
args->line = strdup("to be implemented");
args->line_nr = 0;
args->fileloc = NULL;
dl = disasm_line__new(args);
if (dl)
annotation_line__add(&dl->al, ¬es->src->source);
zfree(&args->line);
return 0;
}
/*
* Possibly create a new version of line with tabs expanded. Returns the
* existing or new line, storage is updated if a new line is allocated. If
* allocation fails then NULL is returned.
*/
static char *expand_tabs(char *line, char **storage, size_t *storage_len)
{
size_t i, src, dst, len, new_storage_len, num_tabs;
char *new_line;
size_t line_len = strlen(line);
for (num_tabs = 0, i = 0; i < line_len; i++)
if (line[i] == '\t')
num_tabs++;
if (num_tabs == 0)
return line;
/*
* Space for the line and '\0', less the leading and trailing
* spaces. Each tab may introduce 7 additional spaces.
*/
new_storage_len = line_len + 1 + (num_tabs * 7);
new_line = malloc(new_storage_len);
if (new_line == NULL) {
pr_err("Failure allocating memory for tab expansion\n");
return NULL;
}
/*
* Copy regions starting at src and expand tabs. If there are two
* adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces
* are inserted.
*/
for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) {
if (line[i] == '\t') {
len = i - src;
memcpy(&new_line[dst], &line[src], len);
dst += len;
new_line[dst++] = ' ';
while (dst % 8 != 0)
new_line[dst++] = ' ';
src = i + 1;
num_tabs--;
}
}
/* Expand the last region. */
len = line_len - src;
memcpy(&new_line[dst], &line[src], len);
dst += len;
new_line[dst] = '\0';
free(*storage);
*storage = new_line;
*storage_len = new_storage_len;
return new_line;
}
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
struct map *map = args->ms.map;
struct dso *dso = map__dso(map);
char *command;
FILE *file;
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
bool delete_extract = false;
bool decomp = false;
int lineno = 0;
char *fileloc = NULL;
int nline;
char *line;
size_t line_len;
const char *objdump_argv[] = {
"/bin/sh",
"-c",
NULL, /* Will be the objdump command to run. */
"--",
NULL, /* Will be the symfs path. */
NULL,
};
struct child_process objdump_process;
int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
if (err)
return err;
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map__unmap_ip(map, sym->start),
map__unmap_ip(map, sym->end));
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
return symbol__disassemble_bpf(sym, args);
} else if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) {
return symbol__disassemble_bpf_image(sym, args);
} else if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;
kce.len = sym->end - sym->start;
if (!kcore_extract__create(&kce)) {
delete_extract = true;
strlcpy(symfs_filename, kce.extract_filename,
sizeof(symfs_filename));
}
} else if (dso__needs_decompress(dso)) {
char tmp[KMOD_DECOMP_LEN];
if (dso__decompress_kmodule_path(dso, symfs_filename,
tmp, sizeof(tmp)) < 0)
return -1;
decomp = true;
strcpy(symfs_filename, tmp);
}
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
" -l -d %s %s %s %c%s%c %s%s -C \"$1\"",
opts->objdump_path ?: "objdump",
opts->disassembler_style ? "-M " : "",
opts->disassembler_style ?: "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
opts->show_asm_raw ? "" : "--no-show-raw-insn",
opts->annotate_src ? "-S" : "",
opts->prefix ? "--prefix " : "",
opts->prefix ? '"' : ' ',
opts->prefix ?: "",
opts->prefix ? '"' : ' ',
opts->prefix_strip ? "--prefix-strip=" : "",
opts->prefix_strip ?: "");
if (err < 0) {
pr_err("Failure allocating memory for the command to run\n");
goto out_remove_tmp;
}
pr_debug("Executing: %s\n", command);
objdump_argv[2] = command;
objdump_argv[4] = symfs_filename;
/* Create a pipe to read from for stdout */
memset(&objdump_process, 0, sizeof(objdump_process));
objdump_process.argv = objdump_argv;
objdump_process.out = -1;
objdump_process.err = -1;
objdump_process.no_stderr = 1;
if (start_command(&objdump_process)) {
pr_err("Failure starting to run %s\n", command);
err = -1;
goto out_free_command;
}
file = fdopen(objdump_process.out, "r");
if (!file) {
pr_err("Failure creating FILE stream for %s\n", command);
/*
* If we were using debug info should retry with
* original binary.
*/
err = -1;
goto out_close_stdout;
}
/* Storage for getline. */
line = NULL;
line_len = 0;
nline = 0;
while (!feof(file)) {
const char *match;
char *expanded_line;
if (getline(&line, &line_len, file) < 0 || !line)
break;
/* Skip lines containing "filename:" */
match = strstr(line, symfs_filename);
if (match && match[strlen(symfs_filename)] == ':')
continue;
expanded_line = strim(line);
expanded_line = expand_tabs(expanded_line, &line, &line_len);
if (!expanded_line)
break;
/*
* The source code line number (lineno) needs to be kept in
* across calls to symbol__parse_objdump_line(), so that it
* can associate it with the instructions till the next one.
* See disasm_line__new() and struct disasm_line::line_nr.
*/
if (symbol__parse_objdump_line(sym, args, expanded_line,
&lineno, &fileloc) < 0)
break;
nline++;
}
free(line);
free(fileloc);
err = finish_command(&objdump_process);
if (err)
pr_err("Error running %s\n", command);
if (nline == 0) {
err = -1;
pr_err("No output from %s\n", command);
}
/*
* kallsyms does not have symbol sizes so there may a nop at the end.
* Remove it.
*/
if (dso__is_kcore(dso))
delete_last_nop(sym);
fclose(file);
out_close_stdout:
close(objdump_process.out);
out_free_command:
free(command);
out_remove_tmp:
if (decomp)
unlink(symfs_filename);
if (delete_extract)
kcore_extract__delete(&kce);
return err;
}
static void calc_percent(struct sym_hist *sym_hist,
struct hists *hists,
struct annotation_data *data,
s64 offset, s64 end)
{
unsigned int hits = 0;
u64 period = 0;
while (offset < end) {
hits += sym_hist->addr[offset].nr_samples;
period += sym_hist->addr[offset].period;
++offset;
}
if (sym_hist->nr_samples) {
data->he.period = period;
data->he.nr_samples = hits;
data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
}
if (hists->stats.nr_non_filtered_samples)
data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
if (sym_hist->period)
data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
if (hists->stats.total_period)
data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
}
static void annotation__calc_percent(struct annotation *notes,
struct evsel *leader, s64 len)
{
struct annotation_line *al, *next;
struct evsel *evsel;
list_for_each_entry(al, ¬es->src->source, node) {
s64 end;
int i = 0;
if (al->offset == -1)
continue;
next = annotation_line__next(al, ¬es->src->source);
end = next ? next->offset : len;
for_each_group_evsel(evsel, leader) {
struct hists *hists = evsel__hists(evsel);
struct annotation_data *data;
struct sym_hist *sym_hist;
BUG_ON(i >= al->data_nr);
sym_hist = annotation__histogram(notes, evsel->core.idx);
data = &al->data[i++];
calc_percent(sym_hist, hists, data, al->offset, end);
}
}
}
void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
annotation__calc_percent(notes, evsel, symbol__size(sym));
}
int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *options, struct arch **parch)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
.evsel = evsel,
.options = options,
};
struct perf_env *env = evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
struct arch *arch;
int err;
if (!arch_name)
return errno;
args.arch = arch = arch__find(arch_name);
if (arch == NULL) {
pr_err("%s: unsupported arch %s\n", __func__, arch_name);
return ENOTSUP;
}
if (parch)
*parch = arch;
if (arch->init) {
err = arch->init(arch, env ? env->cpuid : NULL);
if (err) {
pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
return err;
}
}
args.ms = *ms;
if (notes->options && notes->options->full_addr)
notes->start = map__objdump_2mem(ms->map, ms->sym->start);
else
notes->start = map__rip_2objdump(ms->map, ms->sym->start);
return symbol__disassemble(sym, &args);
}
static void insert_source_line(struct rb_root *root, struct annotation_line *al,
struct annotation_options *opts)
{
struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
int i, ret;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct annotation_line, rb_node);
ret = strcmp(iter->path, al->path);
if (ret == 0) {
for (i = 0; i < al->data_nr; i++) {
iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
opts->percent_type);
}
return;
}
if (ret < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
for (i = 0; i < al->data_nr; i++) {
al->data[i].percent_sum = annotation_data__percent(&al->data[i],
opts->percent_type);
}
rb_link_node(&al->rb_node, parent, p);
rb_insert_color(&al->rb_node, root);
}
static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
{
int i;
for (i = 0; i < a->data_nr; i++) {
if (a->data[i].percent_sum == b->data[i].percent_sum)
continue;
return a->data[i].percent_sum > b->data[i].percent_sum;
}
return 0;
}
static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
{
struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct annotation_line, rb_node);
if (cmp_source_line(al, iter))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&al->rb_node, parent, p);
rb_insert_color(&al->rb_node, root);
}
static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
{
struct annotation_line *al;
struct rb_node *node;
node = rb_first(src_root);
while (node) {
struct rb_node *next;
al = rb_entry(node, struct annotation_line, rb_node);
next = rb_next(node);
rb_erase(node, src_root);
__resort_source_line(dest_root, al);
node = next;
}
}
static void print_summary(struct rb_root *root, const char *filename)
{
struct annotation_line *al;
struct rb_node *node;
printf("\nSorted summary for file %s\n", filename);
printf("----------------------------------------------\n\n");
if (RB_EMPTY_ROOT(root)) {
printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
return;
}
node = rb_first(root);
while (node) {
double percent, percent_max = 0.0;
const char *color;
char *path;
int i;
al = rb_entry(node, struct annotation_line, rb_node);
for (i = 0; i < al->data_nr; i++) {
percent = al->data[i].percent_sum;
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
if (percent > percent_max)
percent_max = percent;
}
path = al->path;
color = get_percent_color(percent_max);
color_fprintf(stdout, color, " %s\n", path);
node = rb_next(node);
}
}
static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
u64 len = symbol__size(sym), offset;
for (offset = 0; offset < len; ++offset)
if (h->addr[offset].nr_samples != 0)
printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
sym->start + offset, h->addr[offset].nr_samples);
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
}
static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
{
char bf[32];
struct annotation_line *line;
list_for_each_entry_reverse(line, lines, node) {
if (line->offset != -1)
return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
}
return 0;
}
int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *opts)
{
struct map *map = ms->map;
struct symbol *sym = ms->sym;
struct dso *dso = map__dso(map);
char *filename;
const char *d_filename;
const char *evsel_name = evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
struct annotation_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
int printed = 2, queue_len = 0, addr_fmt_width;
int more = 0;
bool context = opts->context;
u64 len;
int width = symbol_conf.show_total_period ? 12 : 8;
int graph_dotted_len;
char buf[512];
filename = strdup(dso->long_name);
if (!filename)
return -ENOMEM;
if (opts->full_path)
d_filename = filename;
else
d_filename = basename(filename);
len = symbol__size(sym);
if (evsel__is_group_event(evsel)) {
width *= evsel->core.nr_members;
evsel__group_desc(evsel, buf, sizeof(buf));
evsel_name = buf;
}
graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
"percent: %s)\n",
width, width, symbol_conf.show_total_period ? "Period" :
symbol_conf.show_nr_samples ? "Samples" : "Percent",
d_filename, evsel_name, h->nr_samples,
percent_type_str(opts->percent_type));
printf("%-*.*s----\n",
graph_dotted_len, graph_dotted_len, graph_dotted_line);
if (verbose > 0)
symbol__annotate_hits(sym, evsel);
addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start);
list_for_each_entry(pos, ¬es->src->source, node) {
int err;
if (context && queue == NULL) {
queue = pos;
queue_len = 0;
}
err = annotation_line__print(pos, sym, start, evsel, len,
opts->min_pcnt, printed, opts->max_lines,
queue, addr_fmt_width, opts->percent_type);
switch (err) {
case 0:
++printed;
if (context) {
printed += queue_len;
queue = NULL;
queue_len = 0;
}
break;
case 1:
/* filtered by max_lines */
++more;
break;
case -1:
default:
/*
* Filtered by min_pcnt or non IP lines when
* context != 0
*/
if (!context)
break;
if (queue_len == context)
queue = list_entry(queue->node.next, typeof(*queue), node);
else
++queue_len;
break;
}
}
free(filename);
return more;
}
static void FILE__set_percent_color(void *fp __maybe_unused,
double percent __maybe_unused,
bool current __maybe_unused)
{
}
static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
int nr __maybe_unused, bool current __maybe_unused)
{
return 0;
}
static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
{
return 0;
}
static void FILE__printf(void *fp, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vfprintf(fp, fmt, args);
va_end(args);
}
static void FILE__write_graph(void *fp, int graph)
{
const char *s;
switch (graph) {
case DARROW_CHAR: s = "↓"; break;
case UARROW_CHAR: s = "↑"; break;
case LARROW_CHAR: s = "←"; break;
case RARROW_CHAR: s = "→"; break;
default: s = "?"; break;
}
fputs(s, fp);
}
static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
struct annotation_options *opts)
{
struct annotation *notes = symbol__annotation(sym);
struct annotation_write_ops wops = {
.first_line = true,
.obj = fp,
.set_color = FILE__set_color,
.set_percent_color = FILE__set_percent_color,
.set_jumps_percent_color = FILE__set_jumps_percent_color,
.printf = FILE__printf,
.write_graph = FILE__write_graph,
};
struct annotation_line *al;
list_for_each_entry(al, ¬es->src->source, node) {
if (annotation_line__filter(al, notes))
continue;
annotation_line__write(al, notes, &wops, opts);
fputc('\n', fp);
wops.first_line = false;
}
return 0;
}
int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *opts)
{
const char *ev_name = evsel__name(evsel);
char buf[1024];
char *filename;
int err = -1;
FILE *fp;
if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
return -1;
fp = fopen(filename, "w");
if (fp == NULL)
goto out_free_filename;
if (evsel__is_group_event(evsel)) {
evsel__group_desc(evsel, buf, sizeof(buf));
ev_name = buf;
}
fprintf(fp, "%s() %s\nEvent: %s\n\n",
ms->sym->name, map__dso(ms->map)->long_name, ev_name);
symbol__annotate_fprintf2(ms->sym, fp, opts);
fclose(fp);
err = 0;
out_free_filename:
free(filename);
return err;
}
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
memset(h, 0, notes->src->sizeof_sym_hist);
}
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
int len = symbol__size(sym), offset;
h->nr_samples = 0;
for (offset = 0; offset < len; ++offset) {
h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8;
h->nr_samples += h->addr[offset].nr_samples;
}
}
void annotated_source__purge(struct annotated_source *as)
{
struct annotation_line *al, *n;
list_for_each_entry_safe(al, n, &as->source, node) {
list_del_init(&al->node);
disasm_line__free(disasm_line(al));
}
}
static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
{
size_t printed;
if (dl->al.offset == -1)
return fprintf(fp, "%s\n", dl->al.line);
printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
dl->ops.raw);
}
return printed + fprintf(fp, "\n");
}
size_t disasm__fprintf(struct list_head *head, FILE *fp)
{
struct disasm_line *pos;
size_t printed = 0;
list_for_each_entry(pos, head, al.node)
printed += disasm_line__fprintf(pos, fp);
return printed;
}
bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
{
if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
!disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
dl->ops.target.offset >= (s64)symbol__size(sym))
return false;
return true;
}
void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
{
u64 offset, size = symbol__size(sym);
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
return;
for (offset = 0; offset < size; ++offset) {
struct annotation_line *al = notes->offsets[offset];
struct disasm_line *dl;
dl = disasm_line(al);
if (!disasm_line__is_valid_local_jump(dl, sym))
continue;
al = notes->offsets[dl->ops.target.offset];
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
* have to adjust to the previous offset?
*/
if (al == NULL)
continue;
if (++al->jump_sources > notes->max_jump_sources)
notes->max_jump_sources = al->jump_sources;
}
}
void annotation__set_offsets(struct annotation *notes, s64 size)
{
struct annotation_line *al;
notes->max_line_len = 0;
notes->nr_entries = 0;
notes->nr_asm_entries = 0;
list_for_each_entry(al, ¬es->src->source, node) {
size_t line_len = strlen(al->line);
if (notes->max_line_len < line_len)
notes->max_line_len = line_len;
al->idx = notes->nr_entries++;
if (al->offset != -1) {
al->idx_asm = notes->nr_asm_entries++;
/*
* FIXME: short term bandaid to cope with assembly
* routines that comes with labels in the same column
* as the address in objdump, sigh.
*
* E.g. copy_user_generic_unrolled
*/
if (al->offset < size)
notes->offsets[al->offset] = al;
} else
al->idx_asm = -1;
}
}
static inline int width_jumps(int n)
{
if (n >= 100)
return 5;
if (n / 10)
return 2;
return 1;
}
static int annotation__max_ins_name(struct annotation *notes)
{
int max_name = 0, len;
struct annotation_line *al;
list_for_each_entry(al, ¬es->src->source, node) {
if (al->offset == -1)
continue;
len = strlen(disasm_line(al)->ins.name);
if (max_name < len)
max_name = len;
}
return max_name;
}
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
{
notes->widths.addr = notes->widths.target =
notes->widths.min_addr = hex_width(symbol__size(sym));
notes->widths.max_addr = hex_width(sym->end);
notes->widths.jumps = width_jumps(notes->max_jump_sources);
notes->widths.max_ins_name = annotation__max_ins_name(notes);
}
void annotation__update_column_widths(struct annotation *notes)
{
if (notes->options->use_offset)
notes->widths.target = notes->widths.min_addr;
else if (notes->options->full_addr)
notes->widths.target = BITS_PER_LONG / 4;
else
notes->widths.target = notes->widths.max_addr;
notes->widths.addr = notes->widths.target;
if (notes->options->show_nr_jumps)
notes->widths.addr += notes->widths.jumps + 1;
}
void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
{
notes->options->full_addr = !notes->options->full_addr;
if (notes->options->full_addr)
notes->start = map__objdump_2mem(ms->map, ms->sym->start);
else
notes->start = map__rip_2objdump(ms->map, ms->sym->start);
annotation__update_column_widths(notes);
}
static void annotation__calc_lines(struct annotation *notes, struct map *map,
struct rb_root *root,
struct annotation_options *opts)
{
struct annotation_line *al;
struct rb_root tmp_root = RB_ROOT;
list_for_each_entry(al, ¬es->src->source, node) {
double percent_max = 0.0;
int i;
for (i = 0; i < al->data_nr; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
opts->percent_type);
if (percent > percent_max)
percent_max = percent;
}
if (percent_max <= 0.5)
continue;
al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL,
false, true, notes->start + al->offset);
insert_source_line(&tmp_root, al, opts);
}
resort_source_line(root, &tmp_root);
}
static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root,
struct annotation_options *opts)
{
struct annotation *notes = symbol__annotation(ms->sym);
annotation__calc_lines(notes, ms->map, root, opts);
}
int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *opts)
{
struct dso *dso = map__dso(ms->map);
struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
struct hists *hists = evsel__hists(evsel);
char buf[1024];
int err;
err = symbol__annotate2(ms, evsel, opts, NULL);
if (err) {
char msg[BUFSIZ];
dso->annotate_warned = true;
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
return -1;
}
if (opts->print_lines) {
srcline_full_filename = opts->full_path;
symbol__calc_lines(ms, &source_line, opts);
print_summary(&source_line, dso->long_name);
}
hists__scnprintf_title(hists, buf, sizeof(buf));
fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
buf, percent_type_str(opts->percent_type), sym->name, dso->long_name);
symbol__annotate_fprintf2(sym, stdout, opts);
annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *opts)
{
struct dso *dso = map__dso(ms->map);
struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
int err;
err = symbol__annotate(ms, evsel, opts, NULL);
if (err) {
char msg[BUFSIZ];
dso->annotate_warned = true;
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
return -1;
}
symbol__calc_percent(sym, evsel);
if (opts->print_lines) {
srcline_full_filename = opts->full_path;
symbol__calc_lines(ms, &source_line, opts);
print_summary(&source_line, dso->long_name);
}
symbol__annotate_printf(ms, evsel, opts);
annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
bool ui__has_annotation(void)
{
return use_browser == 1 && perf_hpp_list.sym;
}
static double annotation_line__max_percent(struct annotation_line *al,
struct annotation *notes,
unsigned int percent_type)
{
double percent_max = 0.0;
int i;
for (i = 0; i < notes->nr_events; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
percent_type);
if (percent > percent_max)
percent_max = percent;
}
return percent_max;
}
static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
void *obj, char *bf, size_t size,
void (*obj__printf)(void *obj, const char *fmt, ...),
void (*obj__write_graph)(void *obj, int graph))
{
if (dl->ins.ops && dl->ins.ops->scnprintf) {
if (ins__is_jump(&dl->ins)) {
bool fwd;
if (dl->ops.target.outside)
goto call_like;
fwd = dl->ops.target.offset > dl->al.offset;
obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
obj__printf(obj, " ");
} else if (ins__is_call(&dl->ins)) {
call_like:
obj__write_graph(obj, RARROW_CHAR);
obj__printf(obj, " ");
} else if (ins__is_ret(&dl->ins)) {
obj__write_graph(obj, LARROW_CHAR);
obj__printf(obj, " ");
} else {
obj__printf(obj, " ");
}
} else {
obj__printf(obj, " ");
}
disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
{
double ipc = 0.0, coverage = 0.0;
if (notes->hit_cycles)
ipc = notes->hit_insn / ((double)notes->hit_cycles);
if (notes->total_insn) {
coverage = notes->cover_insn * 100.0 /
((double)notes->total_insn);
}
scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
ipc, coverage);
}
static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
bool first_line, bool current_entry, bool change_color, int width,
void *obj, unsigned int percent_type,
int (*obj__set_color)(void *obj, int color),
void (*obj__set_percent_color)(void *obj, double percent, bool current),
int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
void (*obj__printf)(void *obj, const char *fmt, ...),
void (*obj__write_graph)(void *obj, int graph))
{
double percent_max = annotation_line__max_percent(al, notes, percent_type);
int pcnt_width = annotation__pcnt_width(notes),
cycles_width = annotation__cycles_width(notes);
bool show_title = false;
char bf[256];
int printed;
if (first_line && (al->offset == -1 || percent_max == 0.0)) {
if (notes->have_cycles) {
if (al->ipc == 0.0 && al->cycles == 0)
show_title = true;
} else
show_title = true;
}
if (al->offset != -1 && percent_max != 0.0) {
int i;
for (i = 0; i < notes->nr_events; i++) {
double percent;
percent = annotation_data__percent(&al->data[i], percent_type);
obj__set_percent_color(obj, percent, current_entry);
if (symbol_conf.show_total_period) {
obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
} else if (symbol_conf.show_nr_samples) {
obj__printf(obj, "%6" PRIu64 " ",
al->data[i].he.nr_samples);
} else {
obj__printf(obj, "%6.2f ", percent);
}
}
} else {
obj__set_percent_color(obj, 0, current_entry);
if (!show_title)
obj__printf(obj, "%-*s", pcnt_width, " ");
else {
obj__printf(obj, "%-*s", pcnt_width,
symbol_conf.show_total_period ? "Period" :
symbol_conf.show_nr_samples ? "Samples" : "Percent");
}
}
if (notes->have_cycles) {
if (al->ipc)
obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->ipc);
else if (!show_title)
obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
else
obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
if (!notes->options->show_minmax_cycle) {
if (al->cycles)
obj__printf(obj, "%*" PRIu64 " ",
ANNOTATION__CYCLES_WIDTH - 1, al->cycles);
else if (!show_title)
obj__printf(obj, "%*s",
ANNOTATION__CYCLES_WIDTH, " ");
else
obj__printf(obj, "%*s ",
ANNOTATION__CYCLES_WIDTH - 1,
"Cycle");
} else {
if (al->cycles) {
char str[32];
scnprintf(str, sizeof(str),
"%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
al->cycles, al->cycles_min,
al->cycles_max);
obj__printf(obj, "%*s ",
ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
str);
} else if (!show_title)
obj__printf(obj, "%*s",
ANNOTATION__MINMAX_CYCLES_WIDTH,
" ");
else
obj__printf(obj, "%*s ",
ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
"Cycle(min/max)");
}
if (show_title && !*al->line) {
ipc_coverage_string(bf, sizeof(bf), notes);
obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
}
}
obj__printf(obj, " ");
if (!*al->line)
obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
else if (al->offset == -1) {
if (al->line_nr && notes->options->show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
else
printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " ");
obj__printf(obj, bf);
obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
} else {
u64 addr = al->offset;
int color = -1;
if (!notes->options->use_offset)
addr += notes->start;
if (!notes->options->use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
if (al->jump_sources &&
notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
if (notes->options->show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
notes->widths.jumps,
al->jump_sources);
prev = obj__set_jumps_percent_color(obj, al->jump_sources,
current_entry);
obj__printf(obj, bf);
obj__set_color(obj, prev);
}
print_addr:
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
notes->widths.target, addr);
} else if (ins__is_call(&disasm_line(al)->ins) &&
notes->options->offset_level >= ANNOTATION__OFFSET_CALL) {
goto print_addr;
} else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
goto print_addr;
} else {
printed = scnprintf(bf, sizeof(bf), "%-*s ",
notes->widths.addr, " ");
}
}
if (change_color)
color = obj__set_color(obj, HE_COLORSET_ADDR);
obj__printf(obj, bf);
if (change_color)
obj__set_color(obj, color);
disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
}
}
void annotation_line__write(struct annotation_line *al, struct annotation *notes,
struct annotation_write_ops *wops,
struct annotation_options *opts)
{
__annotation_line__write(al, notes, wops->first_line, wops->current_entry,
wops->change_color, wops->width, wops->obj,
opts->percent_type,
wops->set_color, wops->set_percent_color,
wops->set_jumps_percent_color, wops->printf,
wops->write_graph);
}
int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *options, struct arch **parch)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
size_t size = symbol__size(sym);
int nr_pcnt = 1, err;
notes->offsets = zalloc(size * sizeof(struct annotation_line *));
if (notes->offsets == NULL)
return ENOMEM;
if (evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
err = symbol__annotate(ms, evsel, options, parch);
if (err)
goto out_free_offsets;
notes->options = options;
symbol__calc_percent(sym, evsel);
annotation__set_offsets(notes, size);
annotation__mark_jump_targets(notes, sym);
annotation__compute_ipc(notes, size);
annotation__init_column_widths(notes, sym);
notes->nr_events = nr_pcnt;
annotation__update_column_widths(notes);
sym->annotate2 = 1;
return 0;
out_free_offsets:
zfree(¬es->offsets);
return err;
}
static int annotation__config(const char *var, const char *value, void *data)
{
struct annotation_options *opt = data;
if (!strstarts(var, "annotate."))
return 0;
if (!strcmp(var, "annotate.offset_level")) {
perf_config_u8(&opt->offset_level, "offset_level", value);
if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
} else if (!strcmp(var, "annotate.hide_src_code")) {
opt->hide_src_code = perf_config_bool("hide_src_code", value);
} else if (!strcmp(var, "annotate.jump_arrows")) {
opt->jump_arrows = perf_config_bool("jump_arrows", value);
} else if (!strcmp(var, "annotate.show_linenr")) {
opt->show_linenr = perf_config_bool("show_linenr", value);
} else if (!strcmp(var, "annotate.show_nr_jumps")) {
opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
} else if (!strcmp(var, "annotate.show_nr_samples")) {
symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
value);
} else if (!strcmp(var, "annotate.show_total_period")) {
symbol_conf.show_total_period = perf_config_bool("show_total_period",
value);
} else if (!strcmp(var, "annotate.use_offset")) {
opt->use_offset = perf_config_bool("use_offset", value);
} else if (!strcmp(var, "annotate.disassembler_style")) {
opt->disassembler_style = strdup(value);
if (!opt->disassembler_style) {
pr_err("Not enough memory for annotate.disassembler_style\n");
return -1;
}
} else if (!strcmp(var, "annotate.objdump")) {
opt->objdump_path = strdup(value);
if (!opt->objdump_path) {
pr_err("Not enough memory for annotate.objdump\n");
return -1;
}
} else if (!strcmp(var, "annotate.addr2line")) {
symbol_conf.addr2line_path = strdup(value);
if (!symbol_conf.addr2line_path) {
pr_err("Not enough memory for annotate.addr2line\n");
return -1;
}
} else if (!strcmp(var, "annotate.demangle")) {
symbol_conf.demangle = perf_config_bool("demangle", value);
} else if (!strcmp(var, "annotate.demangle_kernel")) {
symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
} else {
pr_debug("%s variable unknown, ignoring...", var);
}
return 0;
}
void annotation_options__init(struct annotation_options *opt)
{
memset(opt, 0, sizeof(*opt));
/* Default values. */
opt->use_offset = true;
opt->jump_arrows = true;
opt->annotate_src = true;
opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
opt->percent_type = PERCENT_PERIOD_LOCAL;
}
void annotation_options__exit(struct annotation_options *opt)
{
zfree(&opt->disassembler_style);
zfree(&opt->objdump_path);
}
void annotation_config__init(struct annotation_options *opt)
{
perf_config(annotation__config, opt);
}
static unsigned int parse_percent_type(char *str1, char *str2)
{
unsigned int type = (unsigned int) -1;
if (!strcmp("period", str1)) {
if (!strcmp("local", str2))
type = PERCENT_PERIOD_LOCAL;
else if (!strcmp("global", str2))
type = PERCENT_PERIOD_GLOBAL;
}
if (!strcmp("hits", str1)) {
if (!strcmp("local", str2))
type = PERCENT_HITS_LOCAL;
else if (!strcmp("global", str2))
type = PERCENT_HITS_GLOBAL;
}
return type;
}
int annotate_parse_percent_type(const struct option *opt, const char *_str,
int unset __maybe_unused)
{
struct annotation_options *opts = opt->value;
unsigned int type;
char *str1, *str2;
int err = -1;
str1 = strdup(_str);
if (!str1)
return -ENOMEM;
str2 = strchr(str1, '-');
if (!str2)
goto out;
*str2++ = 0;
type = parse_percent_type(str1, str2);
if (type == (unsigned int) -1)
type = parse_percent_type(str2, str1);
if (type != (unsigned int) -1) {
opts->percent_type = type;
err = 0;
}
out:
free(str1);
return err;
}
int annotate_check_args(struct annotation_options *args)
{
if (args->prefix_strip && !args->prefix) {
pr_err("--prefix-strip requires --prefix\n");
return -1;
}
return 0;
}
| linux-master | tools/perf/util/annotate.c |
// SPDX-License-Identifier: GPL-2.0
/* For general debugging purposes */
#include <inttypes.h>
#include <string.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <api/debug.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <sys/time.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
#include "color.h"
#include "event.h"
#include "debug.h"
#include "print_binary.h"
#include "target.h"
#include "trace-event.h"
#include "ui/helpline.h"
#include "ui/ui.h"
#include "util/parse-sublevel-options.h"
#include <linux/ctype.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#else
#define LIBTRACEEVENT_VERSION 0
#endif
int verbose;
int debug_peo_args;
bool dump_trace = false, quiet = false;
int debug_ordered_events;
static int redirect_to_stderr;
int debug_data_convert;
static FILE *debug_file;
bool debug_display_time;
void debug_set_file(FILE *file)
{
debug_file = file;
}
void debug_set_display_time(bool set)
{
debug_display_time = set;
}
static int fprintf_time(FILE *file)
{
struct timeval tod;
struct tm ltime;
char date[64];
if (!debug_display_time)
return 0;
if (gettimeofday(&tod, NULL) != 0)
return 0;
if (localtime_r(&tod.tv_sec, <ime) == NULL)
return 0;
strftime(date, sizeof(date), "%F %H:%M:%S", <ime);
return fprintf(file, "[%s.%06lu] ", date, (long)tod.tv_usec);
}
int veprintf(int level, int var, const char *fmt, va_list args)
{
int ret = 0;
if (var >= level) {
if (use_browser >= 1 && !redirect_to_stderr) {
ui_helpline__vshow(fmt, args);
} else {
ret = fprintf_time(debug_file);
ret += vfprintf(debug_file, fmt, args);
}
}
return ret;
}
int eprintf(int level, int var, const char *fmt, ...)
{
va_list args;
int ret;
va_start(args, fmt);
ret = veprintf(level, var, fmt, args);
va_end(args);
return ret;
}
static int veprintf_time(u64 t, const char *fmt, va_list args)
{
int ret = 0;
u64 secs, usecs, nsecs = t;
secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
usecs = nsecs / NSEC_PER_USEC;
ret = fprintf(stderr, "[%13" PRIu64 ".%06" PRIu64 "] ",
secs, usecs);
ret += vfprintf(stderr, fmt, args);
return ret;
}
int eprintf_time(int level, int var, u64 t, const char *fmt, ...)
{
int ret = 0;
va_list args;
if (var >= level) {
va_start(args, fmt);
ret = veprintf_time(t, fmt, args);
va_end(args);
}
return ret;
}
/*
* Overloading libtraceevent standard info print
* function, display with -v in perf.
*/
void pr_stat(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
veprintf(1, verbose, fmt, args);
va_end(args);
eprintf(1, verbose, "\n");
}
int dump_printf(const char *fmt, ...)
{
va_list args;
int ret = 0;
if (dump_trace) {
va_start(args, fmt);
ret = vprintf(fmt, args);
va_end(args);
}
return ret;
}
static int trace_event_printer(enum binary_printer_ops op,
unsigned int val, void *extra, FILE *fp)
{
const char *color = PERF_COLOR_BLUE;
union perf_event *event = (union perf_event *)extra;
unsigned char ch = (unsigned char)val;
int printed = 0;
switch (op) {
case BINARY_PRINT_DATA_BEGIN:
printed += fprintf(fp, ".");
printed += color_fprintf(fp, color, "\n. ... raw event: size %d bytes\n",
event->header.size);
break;
case BINARY_PRINT_LINE_BEGIN:
printed += fprintf(fp, ".");
break;
case BINARY_PRINT_ADDR:
printed += color_fprintf(fp, color, " %04x: ", val);
break;
case BINARY_PRINT_NUM_DATA:
printed += color_fprintf(fp, color, " %02x", val);
break;
case BINARY_PRINT_NUM_PAD:
printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_SEP:
printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_CHAR_DATA:
printed += color_fprintf(fp, color, "%c",
isprint(ch) && isascii(ch) ? ch : '.');
break;
case BINARY_PRINT_CHAR_PAD:
printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_LINE_END:
printed += color_fprintf(fp, color, "\n");
break;
case BINARY_PRINT_DATA_END:
printed += fprintf(fp, "\n");
break;
default:
break;
}
return printed;
}
void trace_event(union perf_event *event)
{
unsigned char *raw_event = (void *)event;
if (!dump_trace)
return;
print_binary(raw_event, event->header.size, 16,
trace_event_printer, event);
}
static struct sublevel_option debug_opts[] = {
{ .name = "verbose", .value_ptr = &verbose },
{ .name = "ordered-events", .value_ptr = &debug_ordered_events},
{ .name = "stderr", .value_ptr = &redirect_to_stderr},
{ .name = "data-convert", .value_ptr = &debug_data_convert },
{ .name = "perf-event-open", .value_ptr = &debug_peo_args },
{ .name = NULL, }
};
int perf_debug_option(const char *str)
{
int ret;
ret = perf_parse_sublevel_options(str, debug_opts);
if (ret)
return ret;
/* Allow only verbose value in range (0, 10), otherwise set 0. */
verbose = (verbose < 0) || (verbose > 10) ? 0 : verbose;
#if LIBTRACEEVENT_VERSION >= MAKE_LIBTRACEEVENT_VERSION(1, 3, 0)
if (verbose == 1)
tep_set_loglevel(TEP_LOG_INFO);
else if (verbose == 2)
tep_set_loglevel(TEP_LOG_DEBUG);
else if (verbose >= 3)
tep_set_loglevel(TEP_LOG_ALL);
#endif
return 0;
}
int perf_quiet_option(void)
{
struct sublevel_option *opt = &debug_opts[0];
/* disable all debug messages */
while (opt->name) {
*opt->value_ptr = -1;
opt++;
}
/* For debug variables that are used as bool types, set to 0. */
redirect_to_stderr = 0;
debug_peo_args = 0;
return 0;
}
#define DEBUG_WRAPPER(__n, __l) \
static int pr_ ## __n ## _wrapper(const char *fmt, ...) \
{ \
va_list args; \
int ret; \
\
va_start(args, fmt); \
ret = veprintf(__l, verbose, fmt, args); \
va_end(args); \
return ret; \
}
DEBUG_WRAPPER(warning, 0);
DEBUG_WRAPPER(debug, 1);
void perf_debug_setup(void)
{
debug_set_file(stderr);
libapi_set_print(pr_warning_wrapper, pr_warning_wrapper, pr_debug_wrapper);
}
/* Obtain a backtrace and print it to stdout. */
#ifdef HAVE_BACKTRACE_SUPPORT
void dump_stack(void)
{
void *array[16];
size_t size = backtrace(array, ARRAY_SIZE(array));
char **strings = backtrace_symbols(array, size);
size_t i;
printf("Obtained %zd stack frames.\n", size);
for (i = 0; i < size; i++)
printf("%s\n", strings[i]);
free(strings);
}
#else
void dump_stack(void) {}
#endif
void sighandler_dump_stack(int sig)
{
psignal(sig, "perf");
dump_stack();
signal(sig, SIG_DFL);
raise(sig);
}
| linux-master | tools/perf/util/debug.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
#include "util/bpf_map.h"
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <stdbool.h>
#include <stdlib.h>
#include <unistd.h>
static bool bpf_map__is_per_cpu(enum bpf_map_type type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
type == BPF_MAP_TYPE_PERCPU_ARRAY ||
type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
}
static void *bpf_map__alloc_value(const struct bpf_map *map)
{
if (bpf_map__is_per_cpu(bpf_map__type(map)))
return malloc(round_up(bpf_map__value_size(map), 8) *
sysconf(_SC_NPROCESSORS_CONF));
return malloc(bpf_map__value_size(map));
}
int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
{
void *prev_key = NULL, *key, *value;
int fd = bpf_map__fd(map), err;
int printed = 0;
if (fd < 0)
return fd;
if (!map)
return PTR_ERR(map);
err = -ENOMEM;
key = malloc(bpf_map__key_size(map));
if (key == NULL)
goto out;
value = bpf_map__alloc_value(map);
if (value == NULL)
goto out_free_key;
while ((err = bpf_map_get_next_key(fd, prev_key, key) == 0)) {
int intkey = *(int *)key;
if (!bpf_map_lookup_elem(fd, key, value)) {
bool boolval = *(bool *)value;
if (boolval)
printed += fprintf(fp, "[%d] = %d,\n", intkey, boolval);
} else {
printed += fprintf(fp, "[%d] = ERROR,\n", intkey);
}
prev_key = key;
}
if (err == ENOENT)
err = printed;
free(value);
out_free_key:
free(key);
out:
return err;
}
| linux-master | tools/perf/util/bpf_map.c |
// SPDX-License-Identifier: GPL-2.0
#include "callchain.h"
#include "debug.h"
#include "dso.h"
#include "build-id.h"
#include "hist.h"
#include "kvm-stat.h"
#include "map.h"
#include "map_symbol.h"
#include "branch.h"
#include "mem-events.h"
#include "session.h"
#include "namespaces.h"
#include "cgroup.h"
#include "sort.h"
#include "units.h"
#include "evlist.h"
#include "evsel.h"
#include "annotate.h"
#include "srcline.h"
#include "symbol.h"
#include "thread.h"
#include "block-info.h"
#include "ui/progress.h"
#include <errno.h>
#include <math.h>
#include <inttypes.h>
#include <sys/param.h>
#include <linux/rbtree.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he);
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he);
static bool hists__filter_entry_by_socket(struct hists *hists,
struct hist_entry *he);
u16 hists__col_len(struct hists *hists, enum hist_column col)
{
return hists->col_len[col];
}
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
{
hists->col_len[col] = len;
}
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
{
if (len > hists__col_len(hists, col)) {
hists__set_col_len(hists, col, len);
return true;
}
return false;
}
void hists__reset_col_len(struct hists *hists)
{
enum hist_column col;
for (col = 0; col < HISTC_NR_COLS; ++col)
hists__set_col_len(hists, col, 0);
}
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
if (hists__col_len(hists, dso) < unresolved_col_width &&
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
!symbol_conf.dso_list)
hists__set_col_len(hists, dso, unresolved_col_width);
}
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
{
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
int symlen;
u16 len;
if (h->block_info)
return;
/*
* +4 accounts for '[x] ' priv level info
* +2 accounts for 0x prefix on raw addresses
* +3 accounts for ' y ' symtab origin info
*/
if (h->ms.sym) {
symlen = h->ms.sym->namelen + 4;
if (verbose > 0)
symlen += BITS_PER_LONG / 4 + 2 + 3;
hists__new_col_len(hists, HISTC_SYMBOL, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_SYMBOL, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO);
}
len = thread__comm_len(h->thread);
if (hists__new_col_len(hists, HISTC_COMM, len))
hists__set_col_len(hists, HISTC_THREAD, len + 8);
if (h->ms.map) {
len = dso__name_len(map__dso(h->ms.map));
hists__new_col_len(hists, HISTC_DSO, len);
}
if (h->parent)
hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
if (h->branch_info) {
if (h->branch_info->from.ms.sym) {
symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
if (verbose > 0)
symlen += BITS_PER_LONG / 4 + 2 + 3;
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
}
if (h->branch_info->to.ms.sym) {
symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
if (verbose > 0)
symlen += BITS_PER_LONG / 4 + 2 + 3;
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
hists__new_col_len(hists, HISTC_DSO_TO, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
}
if (h->branch_info->srcline_from)
hists__new_col_len(hists, HISTC_SRCLINE_FROM,
strlen(h->branch_info->srcline_from));
if (h->branch_info->srcline_to)
hists__new_col_len(hists, HISTC_SRCLINE_TO,
strlen(h->branch_info->srcline_to));
}
if (h->mem_info) {
if (h->mem_info->daddr.ms.sym) {
symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
+ unresolved_col_width + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
symlen);
hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
symlen + 1);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
symlen);
hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
symlen);
}
if (h->mem_info->iaddr.ms.sym) {
symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
+ unresolved_col_width + 2;
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
symlen);
}
if (h->mem_info->daddr.ms.map) {
symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
}
hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
unresolved_col_width + 4 + 2);
hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
unresolved_col_width + 4 + 2);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
}
hists__new_col_len(hists, HISTC_CGROUP, 6);
hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
hists__new_col_len(hists, HISTC_CPU, 3);
hists__new_col_len(hists, HISTC_SOCKET, 6);
hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
hists__new_col_len(hists, HISTC_MEM_TLB, 22);
hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
if (symbol_conf.nanosecs)
hists__new_col_len(hists, HISTC_TIME, 16);
else
hists__new_col_len(hists, HISTC_TIME, 12);
hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
if (h->srcline) {
len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
hists__new_col_len(hists, HISTC_SRCLINE, len);
}
if (h->srcfile)
hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
if (h->transaction)
hists__new_col_len(hists, HISTC_TRANSACTION,
hist_entry__transaction_len());
if (h->trace_output)
hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
if (h->cgroup) {
const char *cgrp_name = "unknown";
struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
h->cgroup);
if (cgrp != NULL)
cgrp_name = cgrp->name;
hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
}
}
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
int row = 0;
hists__reset_col_len(hists);
while (next && row++ < max_rows) {
n = rb_entry(next, struct hist_entry, rb_node);
if (!n->filtered)
hists__calc_col_len(hists, n);
next = rb_next(&n->rb_node);
}
}
static void he_stat__add_cpumode_period(struct he_stat *he_stat,
unsigned int cpumode, u64 period)
{
switch (cpumode) {
case PERF_RECORD_MISC_KERNEL:
he_stat->period_sys += period;
break;
case PERF_RECORD_MISC_USER:
he_stat->period_us += period;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
he_stat->period_guest_sys += period;
break;
case PERF_RECORD_MISC_GUEST_USER:
he_stat->period_guest_us += period;
break;
default:
break;
}
}
static long hist_time(unsigned long htime)
{
unsigned long time_quantum = symbol_conf.time_quantum;
if (time_quantum)
return (htime / time_quantum) * time_quantum;
return htime;
}
static void he_stat__add_period(struct he_stat *he_stat, u64 period)
{
he_stat->period += period;
he_stat->nr_events += 1;
}
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
{
dest->period += src->period;
dest->period_sys += src->period_sys;
dest->period_us += src->period_us;
dest->period_guest_sys += src->period_guest_sys;
dest->period_guest_us += src->period_guest_us;
dest->nr_events += src->nr_events;
}
static void he_stat__decay(struct he_stat *he_stat)
{
he_stat->period = (he_stat->period * 7) / 8;
he_stat->nr_events = (he_stat->nr_events * 7) / 8;
/* XXX need decay for weight too? */
}
static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
u64 prev_period = he->stat.period;
u64 diff;
if (prev_period == 0)
return true;
he_stat__decay(&he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__decay(he->stat_acc);
decay_callchain(he->callchain);
diff = prev_period - he->stat.period;
if (!he->depth) {
hists->stats.total_period -= diff;
if (!he->filtered)
hists->stats.total_non_filtered_period -= diff;
}
if (!he->leaf) {
struct hist_entry *child;
struct rb_node *node = rb_first_cached(&he->hroot_out);
while (node) {
child = rb_entry(node, struct hist_entry, rb_node);
node = rb_next(node);
if (hists__decay_entry(hists, child))
hists__delete_entry(hists, child);
}
}
return he->stat.period == 0;
}
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
struct rb_root_cached *root_in;
struct rb_root_cached *root_out;
if (he->parent_he) {
root_in = &he->parent_he->hroot_in;
root_out = &he->parent_he->hroot_out;
} else {
if (hists__has(hists, need_collapse))
root_in = &hists->entries_collapsed;
else
root_in = hists->entries_in;
root_out = &hists->entries;
}
rb_erase_cached(&he->rb_node_in, root_in);
rb_erase_cached(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
--hists->nr_non_filtered_entries;
hist_entry__delete(he);
}
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
if (((zap_user && n->level == '.') ||
(zap_kernel && n->level != '.') ||
hists__decay_entry(hists, n))) {
hists__delete_entry(hists, n);
}
}
}
void hists__delete_entries(struct hists *hists)
{
struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
hists__delete_entry(hists, n);
}
}
struct hist_entry *hists__get_entry(struct hists *hists, int idx)
{
struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
int i = 0;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (i == idx)
return n;
next = rb_next(&n->rb_node);
i++;
}
return NULL;
}
/*
* histogram, sorted on item, collects periods
*/
static int hist_entry__init(struct hist_entry *he,
struct hist_entry *template,
bool sample_self,
size_t callchain_size)
{
*he = *template;
he->callchain_size = callchain_size;
if (symbol_conf.cumulate_callchain) {
he->stat_acc = malloc(sizeof(he->stat));
if (he->stat_acc == NULL)
return -ENOMEM;
memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
if (!sample_self)
memset(&he->stat, 0, sizeof(he->stat));
}
he->ms.maps = maps__get(he->ms.maps);
he->ms.map = map__get(he->ms.map);
if (he->branch_info) {
/*
* This branch info is (a part of) allocated from
* sample__resolve_bstack() and will be freed after
* adding new entries. So we need to save a copy.
*/
he->branch_info = malloc(sizeof(*he->branch_info));
if (he->branch_info == NULL)
goto err;
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
}
if (he->mem_info) {
he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
}
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
callchain_init(he->callchain);
if (he->raw_data) {
he->raw_data = memdup(he->raw_data, he->raw_size);
if (he->raw_data == NULL)
goto err_infos;
}
if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
he->srcline = strdup(he->srcline);
if (he->srcline == NULL)
goto err_rawdata;
}
if (symbol_conf.res_sample) {
he->res_samples = calloc(sizeof(struct res_sample),
symbol_conf.res_sample);
if (!he->res_samples)
goto err_srcline;
}
INIT_LIST_HEAD(&he->pairs.node);
he->thread = thread__get(he->thread);
he->hroot_in = RB_ROOT_CACHED;
he->hroot_out = RB_ROOT_CACHED;
if (!symbol_conf.report_hierarchy)
he->leaf = true;
return 0;
err_srcline:
zfree(&he->srcline);
err_rawdata:
zfree(&he->raw_data);
err_infos:
if (he->branch_info) {
map__put(he->branch_info->from.ms.map);
map__put(he->branch_info->to.ms.map);
zfree(&he->branch_info);
}
if (he->mem_info) {
map__put(he->mem_info->iaddr.ms.map);
map__put(he->mem_info->daddr.ms.map);
}
err:
maps__zput(he->ms.maps);
map__zput(he->ms.map);
zfree(&he->stat_acc);
return -ENOMEM;
}
static void *hist_entry__zalloc(size_t size)
{
return zalloc(size + sizeof(struct hist_entry));
}
static void hist_entry__free(void *ptr)
{
free(ptr);
}
static struct hist_entry_ops default_ops = {
.new = hist_entry__zalloc,
.free = hist_entry__free,
};
static struct hist_entry *hist_entry__new(struct hist_entry *template,
bool sample_self)
{
struct hist_entry_ops *ops = template->ops;
size_t callchain_size = 0;
struct hist_entry *he;
int err = 0;
if (!ops)
ops = template->ops = &default_ops;
if (symbol_conf.use_callchain)
callchain_size = sizeof(struct callchain_root);
he = ops->new(callchain_size);
if (he) {
err = hist_entry__init(he, template, sample_self, callchain_size);
if (err) {
ops->free(he);
he = NULL;
}
}
return he;
}
static u8 symbol__parent_filter(const struct symbol *parent)
{
if (symbol_conf.exclude_other && parent == NULL)
return 1 << HIST_FILTER__PARENT;
return 0;
}
static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
{
if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
return;
he->hists->callchain_period += period;
if (!he->filtered)
he->hists->callchain_non_filtered_period += period;
}
static struct hist_entry *hists__findnew_entry(struct hists *hists,
struct hist_entry *entry,
const struct addr_location *al,
bool sample_self)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
u64 period = entry->stat.period;
bool leftmost = true;
p = &hists->entries_in->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
he = rb_entry(parent, struct hist_entry, rb_node_in);
/*
* Make sure that it receives arguments in a same order as
* hist_entry__collapse() so that we can use an appropriate
* function when searching an entry regardless which sort
* keys were used.
*/
cmp = hist_entry__cmp(he, entry);
if (!cmp) {
if (sample_self) {
he_stat__add_period(&he->stat, period);
hist_entry__add_callchain_period(he, period);
}
if (symbol_conf.cumulate_callchain)
he_stat__add_period(he->stat_acc, period);
/*
* This mem info was allocated from sample__resolve_mem
* and will not be used anymore.
*/
mem_info__zput(entry->mem_info);
block_info__zput(entry->block_info);
kvm_info__zput(entry->kvm_info);
/* If the map of an existing hist_entry has
* become out-of-date due to an exec() or
* similar, update it. Otherwise we will
* mis-adjust symbol addresses when computing
* the history counter to increment.
*/
if (he->ms.map != entry->ms.map) {
map__put(he->ms.map);
he->ms.map = map__get(entry->ms.map);
}
goto out;
}
if (cmp < 0)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
he = hist_entry__new(entry, sample_self);
if (!he)
return NULL;
if (sample_self)
hist_entry__add_callchain_period(he, period);
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
out:
if (sample_self)
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
if (symbol_conf.cumulate_callchain)
he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
return he;
}
static unsigned random_max(unsigned high)
{
unsigned thresh = -high % high;
for (;;) {
unsigned r = random();
if (r >= thresh)
return r % high;
}
}
static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
{
struct res_sample *r;
int j;
if (he->num_res < symbol_conf.res_sample) {
j = he->num_res++;
} else {
j = random_max(symbol_conf.res_sample);
}
r = &he->res_samples[j];
r->time = sample->time;
r->cpu = sample->cpu;
r->tid = sample->tid;
}
static struct hist_entry*
__hists__add_entry(struct hists *hists,
struct addr_location *al,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
struct kvm_info *ki,
struct block_info *block_info,
struct perf_sample *sample,
bool sample_self,
struct hist_entry_ops *ops)
{
struct namespaces *ns = thread__namespaces(al->thread);
struct hist_entry entry = {
.thread = al->thread,
.comm = thread__comm(al->thread),
.cgroup_id = {
.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
},
.cgroup = sample->cgroup,
.ms = {
.maps = al->maps,
.map = al->map,
.sym = al->sym,
},
.srcline = (char *) al->srcline,
.socket = al->socket,
.cpu = al->cpu,
.cpumode = al->cpumode,
.ip = al->addr,
.level = al->level,
.code_page_size = sample->code_page_size,
.stat = {
.nr_events = 1,
.period = sample->period,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
.hists = hists,
.branch_info = bi,
.mem_info = mi,
.kvm_info = ki,
.block_info = block_info,
.transaction = sample->transaction,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
.ops = ops,
.time = hist_time(sample->time),
.weight = sample->weight,
.ins_lat = sample->ins_lat,
.p_stage_cyc = sample->p_stage_cyc,
.simd_flags = sample->simd_flags,
}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
if (!hists->has_callchains && he && he->callchain_size != 0)
hists->has_callchains = true;
if (he && symbol_conf.res_sample)
hists__res_sample(he, sample);
return he;
}
struct hist_entry *hists__add_entry(struct hists *hists,
struct addr_location *al,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
struct kvm_info *ki,
struct perf_sample *sample,
bool sample_self)
{
return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
sample, sample_self, NULL);
}
struct hist_entry *hists__add_entry_ops(struct hists *hists,
struct hist_entry_ops *ops,
struct addr_location *al,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
struct kvm_info *ki,
struct perf_sample *sample,
bool sample_self)
{
return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
sample, sample_self, ops);
}
struct hist_entry *hists__add_entry_block(struct hists *hists,
struct addr_location *al,
struct block_info *block_info)
{
struct hist_entry entry = {
.block_info = block_info,
.hists = hists,
.ms = {
.maps = al->maps,
.map = al->map,
.sym = al->sym,
},
}, *he = hists__findnew_entry(hists, &entry, al, false);
return he;
}
static int
iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
struct addr_location *al __maybe_unused)
{
return 0;
}
static int
iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
struct addr_location *al __maybe_unused)
{
return 0;
}
static int
iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
struct perf_sample *sample = iter->sample;
struct mem_info *mi;
mi = sample__resolve_mem(sample, al);
if (mi == NULL)
return -ENOMEM;
iter->priv = mi;
return 0;
}
static int
iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
u64 cost;
struct mem_info *mi = iter->priv;
struct hists *hists = evsel__hists(iter->evsel);
struct perf_sample *sample = iter->sample;
struct hist_entry *he;
if (mi == NULL)
return -EINVAL;
cost = sample->weight;
if (!cost)
cost = 1;
/*
* must pass period=weight in order to get the correct
* sorting from hists__collapse_resort() which is solely
* based on periods. We want sorting be done on nr_events * weight
* and this is indirectly achieved by passing period=weight here
* and the he_stat__add_period() function.
*/
sample->period = cost;
he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
sample, true);
if (!he)
return -ENOMEM;
iter->he = he;
return 0;
}
static int
iter_finish_mem_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
struct evsel *evsel = iter->evsel;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he = iter->he;
int err = -EINVAL;
if (he == NULL)
goto out;
hists__inc_nr_samples(hists, he->filtered);
err = hist_entry__append_callchain(he, iter->sample);
out:
/*
* We don't need to free iter->priv (mem_info) here since the mem info
* was either already freed in hists__findnew_entry() or passed to a
* new hist entry by hist_entry__new().
*/
iter->priv = NULL;
iter->he = NULL;
return err;
}
static int
iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
struct branch_info *bi;
struct perf_sample *sample = iter->sample;
bi = sample__resolve_bstack(sample, al);
if (!bi)
return -ENOMEM;
iter->curr = 0;
iter->total = sample->branch_stack->nr;
iter->priv = bi;
return 0;
}
static int
iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
struct addr_location *al __maybe_unused)
{
return 0;
}
static int
iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
struct branch_info *bi = iter->priv;
int i = iter->curr;
if (bi == NULL)
return 0;
if (iter->curr >= iter->total)
return 0;
maps__put(al->maps);
al->maps = maps__get(bi[i].to.ms.maps);
map__put(al->map);
al->map = map__get(bi[i].to.ms.map);
al->sym = bi[i].to.ms.sym;
al->addr = bi[i].to.addr;
return 1;
}
static int
iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
struct branch_info *bi;
struct evsel *evsel = iter->evsel;
struct hists *hists = evsel__hists(evsel);
struct perf_sample *sample = iter->sample;
struct hist_entry *he = NULL;
int i = iter->curr;
int err = 0;
bi = iter->priv;
if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
goto out;
/*
* The report shows the percentage of total branches captured
* and not events sampled. Thus we use a pseudo period of 1.
*/
sample->period = 1;
sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
sample, true);
if (he == NULL)
return -ENOMEM;
hists__inc_nr_samples(hists, he->filtered);
out:
iter->he = he;
iter->curr++;
return err;
}
static int
iter_finish_branch_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
zfree(&iter->priv);
iter->he = NULL;
return iter->curr >= iter->total ? 0 : -1;
}
static int
iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
struct addr_location *al __maybe_unused)
{
return 0;
}
static int
iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
struct evsel *evsel = iter->evsel;
struct perf_sample *sample = iter->sample;
struct hist_entry *he;
he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
NULL, sample, true);
if (he == NULL)
return -ENOMEM;
iter->he = he;
return 0;
}
static int
iter_finish_normal_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
struct hist_entry *he = iter->he;
struct evsel *evsel = iter->evsel;
struct perf_sample *sample = iter->sample;
if (he == NULL)
return 0;
iter->he = NULL;
hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
return hist_entry__append_callchain(he, sample);
}
static int
iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
struct hist_entry **he_cache;
struct callchain_cursor *cursor = get_tls_callchain_cursor();
if (cursor == NULL)
return -ENOMEM;
callchain_cursor_commit(cursor);
/*
* This is for detecting cycles or recursions so that they're
* cumulated only one time to prevent entries more than 100%
* overhead.
*/
he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
if (he_cache == NULL)
return -ENOMEM;
iter->priv = he_cache;
iter->curr = 0;
return 0;
}
static int
iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al)
{
struct evsel *evsel = iter->evsel;
struct hists *hists = evsel__hists(evsel);
struct perf_sample *sample = iter->sample;
struct hist_entry **he_cache = iter->priv;
struct hist_entry *he;
int err = 0;
he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
sample, true);
if (he == NULL)
return -ENOMEM;
iter->he = he;
he_cache[iter->curr++] = he;
hist_entry__append_callchain(he, sample);
/*
* We need to re-initialize the cursor since callchain_append()
* advanced the cursor to the end.
*/
callchain_cursor_commit(get_tls_callchain_cursor());
hists__inc_nr_samples(hists, he->filtered);
return err;
}
static int
iter_next_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al)
{
struct callchain_cursor_node *node;
node = callchain_cursor_current(get_tls_callchain_cursor());
if (node == NULL)
return 0;
return fill_callchain_info(al, node, iter->hide_unresolved);
}
static bool
hist_entry__fast__sym_diff(struct hist_entry *left,
struct hist_entry *right)
{
struct symbol *sym_l = left->ms.sym;
struct symbol *sym_r = right->ms.sym;
if (!sym_l && !sym_r)
return left->ip != right->ip;
return !!_sort__sym_cmp(sym_l, sym_r);
}
static int
iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al)
{
struct evsel *evsel = iter->evsel;
struct perf_sample *sample = iter->sample;
struct hist_entry **he_cache = iter->priv;
struct hist_entry *he;
struct hist_entry he_tmp = {
.hists = evsel__hists(evsel),
.cpu = al->cpu,
.thread = al->thread,
.comm = thread__comm(al->thread),
.ip = al->addr,
.ms = {
.maps = al->maps,
.map = al->map,
.sym = al->sym,
},
.srcline = (char *) al->srcline,
.parent = iter->parent,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
};
int i;
struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
bool fast = hists__has(he_tmp.hists, sym);
if (tls_cursor == NULL)
return -ENOMEM;
callchain_cursor_snapshot(&cursor, tls_cursor);
callchain_cursor_advance(tls_cursor);
/*
* Check if there's duplicate entries in the callchain.
* It's possible that it has cycles or recursive calls.
*/
for (i = 0; i < iter->curr; i++) {
/*
* For most cases, there are no duplicate entries in callchain.
* The symbols are usually different. Do a quick check for
* symbols first.
*/
if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
continue;
if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
/* to avoid calling callback function */
iter->he = NULL;
return 0;
}
}
he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
NULL, sample, false);
if (he == NULL)
return -ENOMEM;
iter->he = he;
he_cache[iter->curr++] = he;
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
callchain_append(he->callchain, &cursor, sample->period);
return 0;
}
static int
iter_finish_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
zfree(&iter->priv);
iter->he = NULL;
return 0;
}
const struct hist_iter_ops hist_iter_mem = {
.prepare_entry = iter_prepare_mem_entry,
.add_single_entry = iter_add_single_mem_entry,
.next_entry = iter_next_nop_entry,
.add_next_entry = iter_add_next_nop_entry,
.finish_entry = iter_finish_mem_entry,
};
const struct hist_iter_ops hist_iter_branch = {
.prepare_entry = iter_prepare_branch_entry,
.add_single_entry = iter_add_single_branch_entry,
.next_entry = iter_next_branch_entry,
.add_next_entry = iter_add_next_branch_entry,
.finish_entry = iter_finish_branch_entry,
};
const struct hist_iter_ops hist_iter_normal = {
.prepare_entry = iter_prepare_normal_entry,
.add_single_entry = iter_add_single_normal_entry,
.next_entry = iter_next_nop_entry,
.add_next_entry = iter_add_next_nop_entry,
.finish_entry = iter_finish_normal_entry,
};
const struct hist_iter_ops hist_iter_cumulative = {
.prepare_entry = iter_prepare_cumulative_entry,
.add_single_entry = iter_add_single_cumulative_entry,
.next_entry = iter_next_cumulative_entry,
.add_next_entry = iter_add_next_cumulative_entry,
.finish_entry = iter_finish_cumulative_entry,
};
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
int max_stack_depth, void *arg)
{
int err, err2;
struct map *alm = NULL;
if (al)
alm = map__get(al->map);
err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
iter->evsel, al, max_stack_depth);
if (err) {
map__put(alm);
return err;
}
err = iter->ops->prepare_entry(iter, al);
if (err)
goto out;
err = iter->ops->add_single_entry(iter, al);
if (err)
goto out;
if (iter->he && iter->add_entry_cb) {
err = iter->add_entry_cb(iter, al, true, arg);
if (err)
goto out;
}
while (iter->ops->next_entry(iter, al)) {
err = iter->ops->add_next_entry(iter, al);
if (err)
break;
if (iter->he && iter->add_entry_cb) {
err = iter->add_entry_cb(iter, al, false, arg);
if (err)
goto out;
}
}
out:
err2 = iter->ops->finish_entry(iter, al);
if (!err)
err = err2;
map__put(alm);
return err;
}
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
struct hists *hists = left->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
hists__for_each_sort_list(hists, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
!perf_hpp__defined_dynamic_entry(fmt, hists))
continue;
cmp = fmt->cmp(fmt, left, right);
if (cmp)
break;
}
return cmp;
}
int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
struct hists *hists = left->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
hists__for_each_sort_list(hists, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
!perf_hpp__defined_dynamic_entry(fmt, hists))
continue;
cmp = fmt->collapse(fmt, left, right);
if (cmp)
break;
}
return cmp;
}
void hist_entry__delete(struct hist_entry *he)
{
struct hist_entry_ops *ops = he->ops;
thread__zput(he->thread);
maps__zput(he->ms.maps);
map__zput(he->ms.map);
if (he->branch_info) {
map__zput(he->branch_info->from.ms.map);
map__zput(he->branch_info->to.ms.map);
zfree_srcline(&he->branch_info->srcline_from);
zfree_srcline(&he->branch_info->srcline_to);
zfree(&he->branch_info);
}
if (he->mem_info) {
map__zput(he->mem_info->iaddr.ms.map);
map__zput(he->mem_info->daddr.ms.map);
mem_info__zput(he->mem_info);
}
if (he->block_info)
block_info__zput(he->block_info);
if (he->kvm_info)
kvm_info__zput(he->kvm_info);
zfree(&he->res_samples);
zfree(&he->stat_acc);
zfree_srcline(&he->srcline);
if (he->srcfile && he->srcfile[0])
zfree(&he->srcfile);
free_callchain(he->callchain);
zfree(&he->trace_output);
zfree(&he->raw_data);
ops->free(he);
}
/*
* If this is not the last column, then we need to pad it according to the
* pre-calculated max length for this column, otherwise don't bother adding
* spaces because that would break viewing this with, for instance, 'less',
* that would show tons of trailing spaces when a long C++ demangled method
* names is sampled.
*/
int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_fmt *fmt, int printed)
{
if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
const int width = fmt->width(fmt, hpp, he->hists);
if (printed < width) {
advance_hpp(hpp, printed);
printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
}
}
return printed;
}
/*
* collapse the histogram
*/
static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
enum hist_filter type);
typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
static bool check_thread_entry(struct perf_hpp_fmt *fmt)
{
return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
}
static void hist_entry__check_and_remove_filter(struct hist_entry *he,
enum hist_filter type,
fmt_chk_fn check)
{
struct perf_hpp_fmt *fmt;
bool type_match = false;
struct hist_entry *parent = he->parent_he;
switch (type) {
case HIST_FILTER__THREAD:
if (symbol_conf.comm_list == NULL &&
symbol_conf.pid_list == NULL &&
symbol_conf.tid_list == NULL)
return;
break;
case HIST_FILTER__DSO:
if (symbol_conf.dso_list == NULL)
return;
break;
case HIST_FILTER__SYMBOL:
if (symbol_conf.sym_list == NULL)
return;
break;
case HIST_FILTER__PARENT:
case HIST_FILTER__GUEST:
case HIST_FILTER__HOST:
case HIST_FILTER__SOCKET:
case HIST_FILTER__C2C:
default:
return;
}
/* if it's filtered by own fmt, it has to have filter bits */
perf_hpp_list__for_each_format(he->hpp_list, fmt) {
if (check(fmt)) {
type_match = true;
break;
}
}
if (type_match) {
/*
* If the filter is for current level entry, propagate
* filter marker to parents. The marker bit was
* already set by default so it only needs to clear
* non-filtered entries.
*/
if (!(he->filtered & (1 << type))) {
while (parent) {
parent->filtered &= ~(1 << type);
parent = parent->parent_he;
}
}
} else {
/*
* If current entry doesn't have matching formats, set
* filter marker for upper level entries. it will be
* cleared if its lower level entries is not filtered.
*
* For lower-level entries, it inherits parent's
* filter bit so that lower level entries of a
* non-filtered entry won't set the filter marker.
*/
if (parent == NULL)
he->filtered |= (1 << type);
else
he->filtered |= (parent->filtered & (1 << type));
}
}
static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
{
hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
check_thread_entry);
hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
perf_hpp__is_dso_entry);
hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
perf_hpp__is_sym_entry);
hists__apply_filters(he->hists, he);
}
static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
struct rb_root_cached *root,
struct hist_entry *he,
struct hist_entry *parent_he,
struct perf_hpp_list *hpp_list)
{
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter, *new;
struct perf_hpp_fmt *fmt;
int64_t cmp;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node_in);
cmp = 0;
perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
cmp = fmt->collapse(fmt, iter, he);
if (cmp)
break;
}
if (!cmp) {
he_stat__add_stat(&iter->stat, &he->stat);
return iter;
}
if (cmp < 0)
p = &parent->rb_left;
else {
p = &parent->rb_right;
leftmost = false;
}
}
new = hist_entry__new(he, true);
if (new == NULL)
return NULL;
hists->nr_entries++;
/* save related format list for output */
new->hpp_list = hpp_list;
new->parent_he = parent_he;
hist_entry__apply_hierarchy_filters(new);
/* some fields are now passed to 'new' */
perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
he->trace_output = NULL;
else
new->trace_output = NULL;
if (perf_hpp__is_srcline_entry(fmt))
he->srcline = NULL;
else
new->srcline = NULL;
if (perf_hpp__is_srcfile_entry(fmt))
he->srcfile = NULL;
else
new->srcfile = NULL;
}
rb_link_node(&new->rb_node_in, parent, p);
rb_insert_color_cached(&new->rb_node_in, root, leftmost);
return new;
}
static int hists__hierarchy_insert_entry(struct hists *hists,
struct rb_root_cached *root,
struct hist_entry *he)
{
struct perf_hpp_list_node *node;
struct hist_entry *new_he = NULL;
struct hist_entry *parent = NULL;
int depth = 0;
int ret = 0;
list_for_each_entry(node, &hists->hpp_formats, list) {
/* skip period (overhead) and elided columns */
if (node->level == 0 || node->skip)
continue;
/* insert copy of 'he' for each fmt into the hierarchy */
new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
if (new_he == NULL) {
ret = -1;
break;
}
root = &new_he->hroot_in;
new_he->depth = depth++;
parent = new_he;
}
if (new_he) {
new_he->leaf = true;
if (hist_entry__has_callchains(new_he) &&
symbol_conf.use_callchain) {
struct callchain_cursor *cursor = get_tls_callchain_cursor();
if (cursor == NULL)
return -1;
callchain_cursor_reset(cursor);
if (callchain_merge(cursor,
new_he->callchain,
he->callchain) < 0)
ret = -1;
}
}
/* 'he' is no longer used */
hist_entry__delete(he);
/* return 0 (or -1) since it already applied filters */
return ret;
}
static int hists__collapse_insert_entry(struct hists *hists,
struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
bool leftmost = true;
if (symbol_conf.report_hierarchy)
return hists__hierarchy_insert_entry(hists, root, he);
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node_in);
cmp = hist_entry__collapse(iter, he);
if (!cmp) {
int ret = 0;
he_stat__add_stat(&iter->stat, &he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__add_stat(iter->stat_acc, he->stat_acc);
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
struct callchain_cursor *cursor = get_tls_callchain_cursor();
if (cursor != NULL) {
callchain_cursor_reset(cursor);
if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
ret = -1;
} else {
ret = 0;
}
}
hist_entry__delete(he);
return ret;
}
if (cmp < 0)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color_cached(&he->rb_node_in, root, leftmost);
return 1;
}
struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
{
struct rb_root_cached *root;
mutex_lock(&hists->lock);
root = hists->entries_in;
if (++hists->entries_in > &hists->entries_in_array[1])
hists->entries_in = &hists->entries_in_array[0];
mutex_unlock(&hists->lock);
return root;
}
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
hists__filter_entry_by_dso(hists, he);
hists__filter_entry_by_thread(hists, he);
hists__filter_entry_by_symbol(hists, he);
hists__filter_entry_by_socket(hists, he);
}
int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
int ret;
if (!hists__has(hists, need_collapse))
return 0;
hists->nr_entries = 0;
root = hists__get_rotate_entries_in(hists);
next = rb_first_cached(root);
while (next) {
if (session_done())
break;
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
rb_erase_cached(&n->rb_node_in, root);
ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
if (ret < 0)
return -1;
if (ret) {
/*
* If it wasn't combined with one of the entries already
* collapsed, we need to apply the filters that may have
* been set by, say, the hist_browser.
*/
hists__apply_filters(hists, n);
}
if (prog)
ui_progress__update(prog, 1);
}
return 0;
}
static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
{
struct hists *hists = a->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
hists__for_each_sort_list(hists, fmt) {
if (perf_hpp__should_skip(fmt, a->hists))
continue;
cmp = fmt->sort(fmt, a, b);
if (cmp)
break;
}
return cmp;
}
static void hists__reset_filter_stats(struct hists *hists)
{
hists->nr_non_filtered_entries = 0;
hists->stats.total_non_filtered_period = 0;
}
void hists__reset_stats(struct hists *hists)
{
hists->nr_entries = 0;
hists->stats.total_period = 0;
hists__reset_filter_stats(hists);
}
static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
{
hists->nr_non_filtered_entries++;
hists->stats.total_non_filtered_period += h->stat.period;
}
void hists__inc_stats(struct hists *hists, struct hist_entry *h)
{
if (!h->filtered)
hists__inc_filter_stats(hists, h);
hists->nr_entries++;
hists->stats.total_period += h->stat.period;
}
static void hierarchy_recalc_total_periods(struct hists *hists)
{
struct rb_node *node;
struct hist_entry *he;
node = rb_first_cached(&hists->entries);
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
/*
* recalculate total period using top-level entries only
* since lower level entries only see non-filtered entries
* but upper level entries have sum of both entries.
*/
while (node) {
he = rb_entry(node, struct hist_entry, rb_node);
node = rb_next(node);
hists->stats.total_period += he->stat.period;
if (!he->filtered)
hists->stats.total_non_filtered_period += he->stat.period;
}
}
static void hierarchy_insert_output_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__sort(he, iter) > 0)
p = &parent->rb_left;
else {
p = &parent->rb_right;
leftmost = false;
}
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color_cached(&he->rb_node, root, leftmost);
/* update column width of dynamic entry */
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
if (fmt->init)
fmt->init(fmt, he);
}
}
static void hists__hierarchy_output_resort(struct hists *hists,
struct ui_progress *prog,
struct rb_root_cached *root_in,
struct rb_root_cached *root_out,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node *node;
struct hist_entry *he;
*root_out = RB_ROOT_CACHED;
node = rb_first_cached(root_in);
while (node) {
he = rb_entry(node, struct hist_entry, rb_node_in);
node = rb_next(node);
hierarchy_insert_output_entry(root_out, he);
if (prog)
ui_progress__update(prog, 1);
hists->nr_entries++;
if (!he->filtered) {
hists->nr_non_filtered_entries++;
hists__calc_col_len(hists, he);
}
if (!he->leaf) {
hists__hierarchy_output_resort(hists, prog,
&he->hroot_in,
&he->hroot_out,
min_callchain_hits,
use_callchain);
continue;
}
if (!use_callchain)
continue;
if (callchain_param.mode == CHAIN_GRAPH_REL) {
u64 total = he->stat.period;
if (symbol_conf.cumulate_callchain)
total = he->stat_acc->period;
min_callchain_hits = total * (callchain_param.min_percent / 100);
}
callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
}
}
static void __hists__insert_output_entry(struct rb_root_cached *entries,
struct hist_entry *he,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node **p = &entries->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
bool leftmost = true;
if (use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL) {
u64 total = he->stat.period;
if (symbol_conf.cumulate_callchain)
total = he->stat_acc->period;
min_callchain_hits = total * (callchain_param.min_percent / 100);
}
callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
}
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color_cached(&he->rb_node, entries, leftmost);
/* update column width of dynamic entries */
perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
if (fmt->init)
fmt->init(fmt, he);
}
}
static void output_resort(struct hists *hists, struct ui_progress *prog,
bool use_callchain, hists__resort_cb_t cb,
void *cb_arg)
{
struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
u64 callchain_total;
u64 min_callchain_hits;
callchain_total = hists->callchain_period;
if (symbol_conf.filter_relative)
callchain_total = hists->callchain_non_filtered_period;
min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
hists__reset_stats(hists);
hists__reset_col_len(hists);
if (symbol_conf.report_hierarchy) {
hists__hierarchy_output_resort(hists, prog,
&hists->entries_collapsed,
&hists->entries,
min_callchain_hits,
use_callchain);
hierarchy_recalc_total_periods(hists);
return;
}
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
next = rb_first_cached(root);
hists->entries = RB_ROOT_CACHED;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
if (cb && cb(n, cb_arg))
continue;
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
hists__inc_stats(hists, n);
if (!n->filtered)
hists__calc_col_len(hists, n);
if (prog)
ui_progress__update(prog, 1);
}
}
void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
hists__resort_cb_t cb, void *cb_arg)
{
bool use_callchain;
if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
use_callchain = evsel__has_callchain(evsel);
else
use_callchain = symbol_conf.use_callchain;
use_callchain |= symbol_conf.show_branchflag_count;
output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
}
void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
{
return evsel__output_resort_cb(evsel, prog, NULL, NULL);
}
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
}
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
hists__resort_cb_t cb)
{
output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
}
static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
{
if (he->leaf || hmd == HMD_FORCE_SIBLING)
return false;
if (he->unfolded || hmd == HMD_FORCE_CHILD)
return true;
return false;
}
struct rb_node *rb_hierarchy_last(struct rb_node *node)
{
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
while (can_goto_child(he, HMD_NORMAL)) {
node = rb_last(&he->hroot_out.rb_root);
he = rb_entry(node, struct hist_entry, rb_node);
}
return node;
}
struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
{
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
if (can_goto_child(he, hmd))
node = rb_first_cached(&he->hroot_out);
else
node = rb_next(node);
while (node == NULL) {
he = he->parent_he;
if (he == NULL)
break;
node = rb_next(&he->rb_node);
}
return node;
}
struct rb_node *rb_hierarchy_prev(struct rb_node *node)
{
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
node = rb_prev(node);
if (node)
return rb_hierarchy_last(node);
he = he->parent_he;
if (he == NULL)
return NULL;
return &he->rb_node;
}
bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
{
struct rb_node *node;
struct hist_entry *child;
float percent;
if (he->leaf)
return false;
node = rb_first_cached(&he->hroot_out);
child = rb_entry(node, struct hist_entry, rb_node);
while (node && child->filtered) {
node = rb_next(node);
child = rb_entry(node, struct hist_entry, rb_node);
}
if (node)
percent = hist_entry__get_percent_limit(child);
else
percent = 0;
return node && percent >= limit;
}
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
enum hist_filter filter)
{
h->filtered &= ~(1 << filter);
if (symbol_conf.report_hierarchy) {
struct hist_entry *parent = h->parent_he;
while (parent) {
he_stat__add_stat(&parent->stat, &h->stat);
parent->filtered &= ~(1 << filter);
if (parent->filtered)
goto next;
/* force fold unfiltered entry for simplicity */
parent->unfolded = false;
parent->has_no_entry = false;
parent->row_offset = 0;
parent->nr_rows = 0;
next:
parent = parent->parent_he;
}
}
if (h->filtered)
return;
/* force fold unfiltered entry for simplicity */
h->unfolded = false;
h->has_no_entry = false;
h->row_offset = 0;
h->nr_rows = 0;
hists->stats.nr_non_filtered_samples += h->stat.nr_events;
hists__inc_filter_stats(hists, h);
hists__calc_col_len(hists, h);
}
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he)
{
if (hists->dso_filter != NULL &&
(he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
he->filtered |= (1 << HIST_FILTER__DSO);
return true;
}
return false;
}
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
if (hists->thread_filter != NULL &&
RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(hists->thread_filter)) {
he->filtered |= (1 << HIST_FILTER__THREAD);
return true;
}
return false;
}
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he)
{
if (hists->symbol_filter_str != NULL &&
(!he->ms.sym || strstr(he->ms.sym->name,
hists->symbol_filter_str) == NULL)) {
he->filtered |= (1 << HIST_FILTER__SYMBOL);
return true;
}
return false;
}
static bool hists__filter_entry_by_socket(struct hists *hists,
struct hist_entry *he)
{
if ((hists->socket_filter > -1) &&
(he->socket != hists->socket_filter)) {
he->filtered |= (1 << HIST_FILTER__SOCKET);
return true;
}
return false;
}
typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
{
struct rb_node *nd;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (filter(hists, h))
continue;
hists__remove_entry_filter(hists, h, type);
}
}
static void resort_filtered_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct rb_root_cached new_root = RB_ROOT_CACHED;
struct rb_node *nd;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color_cached(&he->rb_node, root, leftmost);
if (he->leaf || he->filtered)
return;
nd = rb_first_cached(&he->hroot_out);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
rb_erase_cached(&h->rb_node, &he->hroot_out);
resort_filtered_entry(&new_root, h);
}
he->hroot_out = new_root;
}
static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
struct rb_root_cached new_root = RB_ROOT_CACHED;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
int ret;
ret = hist_entry__filter(h, type, arg);
/*
* case 1. non-matching type
* zero out the period, set filter marker and move to child
*/
if (ret < 0) {
memset(&h->stat, 0, sizeof(h->stat));
h->filtered |= (1 << type);
nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
}
/*
* case 2. matched type (filter out)
* set filter marker and move to next
*/
else if (ret == 1) {
h->filtered |= (1 << type);
nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
}
/*
* case 3. ok (not filtered)
* add period to hists and parents, erase the filter marker
* and move to next sibling
*/
else {
hists__remove_entry_filter(hists, h, type);
nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
}
}
hierarchy_recalc_total_periods(hists);
/*
* resort output after applying a new filter since filter in a lower
* hierarchy can change periods in a upper hierarchy.
*/
nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
rb_erase_cached(&h->rb_node, &hists->entries);
resort_filtered_entry(&new_root, h);
}
hists->entries = new_root;
}
void hists__filter_by_thread(struct hists *hists)
{
if (symbol_conf.report_hierarchy)
hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
hists->thread_filter);
else
hists__filter_by_type(hists, HIST_FILTER__THREAD,
hists__filter_entry_by_thread);
}
void hists__filter_by_dso(struct hists *hists)
{
if (symbol_conf.report_hierarchy)
hists__filter_hierarchy(hists, HIST_FILTER__DSO,
hists->dso_filter);
else
hists__filter_by_type(hists, HIST_FILTER__DSO,
hists__filter_entry_by_dso);
}
void hists__filter_by_symbol(struct hists *hists)
{
if (symbol_conf.report_hierarchy)
hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
hists->symbol_filter_str);
else
hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
hists__filter_entry_by_symbol);
}
void hists__filter_by_socket(struct hists *hists)
{
if (symbol_conf.report_hierarchy)
hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
&hists->socket_filter);
else
hists__filter_by_type(hists, HIST_FILTER__SOCKET,
hists__filter_entry_by_socket);
}
void events_stats__inc(struct events_stats *stats, u32 type)
{
++stats->nr_events[0];
++stats->nr_events[type];
}
static void hists_stats__inc(struct hists_stats *stats)
{
++stats->nr_samples;
}
void hists__inc_nr_events(struct hists *hists)
{
hists_stats__inc(&hists->stats);
}
void hists__inc_nr_samples(struct hists *hists, bool filtered)
{
hists_stats__inc(&hists->stats);
if (!filtered)
hists->stats.nr_non_filtered_samples++;
}
void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
{
hists->stats.nr_lost_samples += lost;
}
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
bool leftmost = true;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
p = &root->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
he = rb_entry(parent, struct hist_entry, rb_node_in);
cmp = hist_entry__collapse(he, pair);
if (!cmp)
goto out;
if (cmp < 0)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
he = hist_entry__new(pair, true);
if (he) {
memset(&he->stat, 0, sizeof(he->stat));
he->hists = hists;
if (symbol_conf.cumulate_callchain)
memset(he->stat_acc, 0, sizeof(he->stat));
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color_cached(&he->rb_node_in, root, leftmost);
hists__inc_stats(hists, he);
he->dummy = true;
}
out:
return he;
}
static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
struct rb_root_cached *root,
struct hist_entry *pair)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
bool leftmost = true;
p = &root->rb_root.rb_node;
while (*p != NULL) {
int64_t cmp = 0;
parent = *p;
he = rb_entry(parent, struct hist_entry, rb_node_in);
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
cmp = fmt->collapse(fmt, he, pair);
if (cmp)
break;
}
if (!cmp)
goto out;
if (cmp < 0)
p = &parent->rb_left;
else {
p = &parent->rb_right;
leftmost = false;
}
}
he = hist_entry__new(pair, true);
if (he) {
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color_cached(&he->rb_node_in, root, leftmost);
he->dummy = true;
he->hists = hists;
memset(&he->stat, 0, sizeof(he->stat));
hists__inc_stats(hists, he);
}
out:
return he;
}
static struct hist_entry *hists__find_entry(struct hists *hists,
struct hist_entry *he)
{
struct rb_node *n;
if (hists__has(hists, need_collapse))
n = hists->entries_collapsed.rb_root.rb_node;
else
n = hists->entries_in->rb_root.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
int64_t cmp = hist_entry__collapse(iter, he);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else
return iter;
}
return NULL;
}
static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node *n = root->rb_root.rb_node;
while (n) {
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
iter = rb_entry(n, struct hist_entry, rb_node_in);
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
cmp = fmt->collapse(fmt, iter, he);
if (cmp)
break;
}
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else
return iter;
}
return NULL;
}
static void hists__match_hierarchy(struct rb_root_cached *leader_root,
struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *pair;
for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_hierarchy_entry(other_root, pos);
if (pair) {
hist_entry__add_pair(pair, pos);
hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
}
}
}
/*
* Look for pairs to link to the leader buckets (hist_entries):
*/
void hists__match(struct hists *leader, struct hists *other)
{
struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
if (symbol_conf.report_hierarchy) {
/* hierarchy report always collapses entries */
return hists__match_hierarchy(&leader->entries_collapsed,
&other->entries_collapsed);
}
if (hists__has(leader, need_collapse))
root = &leader->entries_collapsed;
else
root = leader->entries_in;
for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_entry(other, pos);
if (pair)
hist_entry__add_pair(pair, pos);
}
}
static int hists__link_hierarchy(struct hists *leader_hists,
struct hist_entry *parent,
struct rb_root_cached *leader_root,
struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *leader;
for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(pos)) {
bool found = false;
list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
if (leader->hists == leader_hists) {
found = true;
break;
}
}
if (!found)
return -1;
} else {
leader = add_dummy_hierarchy_entry(leader_hists,
leader_root, pos);
if (leader == NULL)
return -1;
/* do not point parent in the pos */
leader->parent_he = parent;
hist_entry__add_pair(pos, leader);
}
if (!pos->leaf) {
if (hists__link_hierarchy(leader_hists, leader,
&leader->hroot_in,
&pos->hroot_in) < 0)
return -1;
}
}
return 0;
}
/*
* Look for entries in the other hists that are not present in the leader, if
* we find them, just add a dummy entry on the leader hists, with period=0,
* nr_events=0, to serve as the list header.
*/
int hists__link(struct hists *leader, struct hists *other)
{
struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
if (symbol_conf.report_hierarchy) {
/* hierarchy report always collapses entries */
return hists__link_hierarchy(leader, NULL,
&leader->entries_collapsed,
&other->entries_collapsed);
}
if (hists__has(other, need_collapse))
root = &other->entries_collapsed;
else
root = other->entries_in;
for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (!hist_entry__has_pairs(pos)) {
pair = hists__add_dummy_entry(leader, pos);
if (pair == NULL)
return -1;
hist_entry__add_pair(pos, pair);
}
}
return 0;
}
int hists__unlink(struct hists *hists)
{
struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
list_del_init(&pos->pairs.node);
}
return 0;
}
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample, bool nonany_branch_mode,
u64 *total_cycles)
{
struct branch_info *bi;
struct branch_entry *entries = perf_sample__branch_entries(sample);
/* If we have branch cycles always annotate them. */
if (bs && bs->nr && entries[0].flags.cycles) {
int i;
bi = sample__resolve_bstack(sample, al);
if (bi) {
struct addr_map_symbol *prev = NULL;
/*
* Ignore errors, still want to process the
* other entries.
*
* For non standard branch modes always
* force no IPC (prev == NULL)
*
* Note that perf stores branches reversed from
* program order!
*/
for (i = bs->nr - 1; i >= 0; i--) {
addr_map_symbol__account_cycles(&bi[i].from,
nonany_branch_mode ? NULL : prev,
bi[i].flags.cycles);
prev = &bi[i].to;
if (total_cycles)
*total_cycles += bi[i].flags.cycles;
}
free(bi);
}
}
}
size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
bool skip_empty)
{
struct evsel *pos;
size_t ret = 0;
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
continue;
ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
if (hists->stats.nr_samples)
ret += fprintf(fp, "%16s events: %10d\n",
"SAMPLE", hists->stats.nr_samples);
if (hists->stats.nr_lost_samples)
ret += fprintf(fp, "%16s events: %10d\n",
"LOST_SAMPLES", hists->stats.nr_lost_samples);
}
return ret;
}
u64 hists__total_period(struct hists *hists)
{
return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
hists->stats.total_period;
}
int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
{
char unit;
int printed;
const struct dso *dso = hists->dso_filter;
struct thread *thread = hists->thread_filter;
int socket_id = hists->socket_filter;
unsigned long nr_samples = hists->stats.nr_samples;
u64 nr_events = hists->stats.total_period;
struct evsel *evsel = hists_to_evsel(hists);
const char *ev_name = evsel__name(evsel);
char buf[512], sample_freq_str[64] = "";
size_t buflen = sizeof(buf);
char ref[30] = " show reference callgraph, ";
bool enable_ref = false;
if (symbol_conf.filter_relative) {
nr_samples = hists->stats.nr_non_filtered_samples;
nr_events = hists->stats.total_non_filtered_period;
}
if (evsel__is_group_event(evsel)) {
struct evsel *pos;
evsel__group_desc(evsel, buf, buflen);
ev_name = buf;
for_each_group_member(pos, evsel) {
struct hists *pos_hists = evsel__hists(pos);
if (symbol_conf.filter_relative) {
nr_samples += pos_hists->stats.nr_non_filtered_samples;
nr_events += pos_hists->stats.total_non_filtered_period;
} else {
nr_samples += pos_hists->stats.nr_samples;
nr_events += pos_hists->stats.total_period;
}
}
}
if (symbol_conf.show_ref_callgraph &&
strstr(ev_name, "call-graph=no"))
enable_ref = true;
if (show_freq)
scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
nr_samples = convert_unit(nr_samples, &unit);
printed = scnprintf(bf, size,
"Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
if (hists->uid_filter_str)
printed += snprintf(bf + printed, size - printed,
", UID: %s", hists->uid_filter_str);
if (thread) {
if (hists__has(hists, thread)) {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
(thread__comm_set(thread) ? thread__comm_str(thread) : ""),
thread__tid(thread));
} else {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s",
(thread__comm_set(thread) ? thread__comm_str(thread) : ""));
}
}
if (dso)
printed += scnprintf(bf + printed, size - printed,
", DSO: %s", dso->short_name);
if (socket_id > -1)
printed += scnprintf(bf + printed, size - printed,
", Processor Socket: %d", socket_id);
return printed;
}
int parse_filter_percentage(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
if (!strcmp(arg, "relative"))
symbol_conf.filter_relative = true;
else if (!strcmp(arg, "absolute"))
symbol_conf.filter_relative = false;
else {
pr_debug("Invalid percentage: %s\n", arg);
return -1;
}
return 0;
}
int perf_hist_config(const char *var, const char *value)
{
if (!strcmp(var, "hist.percentage"))
return parse_filter_percentage(NULL, value, 0);
return 0;
}
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
hists->entries_in = &hists->entries_in_array[0];
hists->entries_collapsed = RB_ROOT_CACHED;
hists->entries = RB_ROOT_CACHED;
mutex_init(&hists->lock);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
INIT_LIST_HEAD(&hists->hpp_formats);
return 0;
}
static void hists__delete_remaining_entries(struct rb_root_cached *root)
{
struct rb_node *node;
struct hist_entry *he;
while (!RB_EMPTY_ROOT(&root->rb_root)) {
node = rb_first_cached(root);
rb_erase_cached(node, root);
he = rb_entry(node, struct hist_entry, rb_node_in);
hist_entry__delete(he);
}
}
static void hists__delete_all_entries(struct hists *hists)
{
hists__delete_entries(hists);
hists__delete_remaining_entries(&hists->entries_in_array[0]);
hists__delete_remaining_entries(&hists->entries_in_array[1]);
hists__delete_remaining_entries(&hists->entries_collapsed);
}
static void hists_evsel__exit(struct evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
struct perf_hpp_fmt *fmt, *pos;
struct perf_hpp_list_node *node, *tmp;
hists__delete_all_entries(hists);
list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
list_del_init(&fmt->list);
free(fmt);
}
list_del_init(&node->list);
free(node);
}
}
static int hists_evsel__init(struct evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
__hists__init(hists, &perf_hpp_list);
return 0;
}
/*
* XXX We probably need a hists_evsel__exit() to free the hist_entries
* stored in the rbtree...
*/
int hists__init(void)
{
int err = evsel__object_config(sizeof(struct hists_evsel),
hists_evsel__init, hists_evsel__exit);
if (err)
fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
return err;
}
void perf_hpp_list__init(struct perf_hpp_list *list)
{
INIT_LIST_HEAD(&list->fields);
INIT_LIST_HEAD(&list->sorts);
}
| linux-master | tools/perf/util/hist.c |
// SPDX-License-Identifier: GPL-2.0
/*
* build-id.c
*
* build-id support
*
* Copyright (C) 2009, 2010 Red Hat Inc.
* Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <[email protected]>
*/
#include "util.h" // lsdir(), mkdir_p(), rm_rf()
#include <dirent.h>
#include <errno.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "util/copyfile.h"
#include "dso.h"
#include "build-id.h"
#include "event.h"
#include "namespaces.h"
#include "map.h"
#include "symbol.h"
#include "thread.h"
#include <linux/kernel.h>
#include "debug.h"
#include "session.h"
#include "tool.h"
#include "header.h"
#include "vdso.h"
#include "path.h"
#include "probe-file.h"
#include "strlist.h"
#ifdef HAVE_DEBUGINFOD_SUPPORT
#include <elfutils/debuginfod.h>
#endif
#include <linux/ctype.h>
#include <linux/zalloc.h>
#include <linux/string.h>
#include <asm/bug.h>
static bool no_buildid_cache;
int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
addr_location__init(&al);
if (thread__find_map(thread, sample->cpumode, sample->ip, &al))
map__dso(al.map)->hit = 1;
addr_location__exit(&al);
thread__put(thread);
return 0;
}
static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample
__maybe_unused,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine,
event->fork.pid,
event->fork.tid);
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
if (thread) {
machine__remove_thread(machine, thread);
thread__put(thread);
}
return 0;
}
struct perf_tool build_id__mark_dso_hit_ops = {
.sample = build_id__mark_dso_hit,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.fork = perf_event__process_fork,
.exit = perf_event__exit_del_thread,
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
.ordered_events = true,
};
int build_id__sprintf(const struct build_id *build_id, char *bf)
{
char *bid = bf;
const u8 *raw = build_id->data;
size_t i;
bf[0] = 0x0;
for (i = 0; i < build_id->size; ++i) {
sprintf(bid, "%02x", *raw);
++raw;
bid += 2;
}
return (bid - bf) + 1;
}
int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id)
{
char notes[PATH_MAX];
struct build_id bid;
int ret;
if (!root_dir)
root_dir = "";
scnprintf(notes, sizeof(notes), "%s/sys/kernel/notes", root_dir);
ret = sysfs__read_build_id(notes, &bid);
if (ret < 0)
return ret;
return build_id__sprintf(&bid, sbuild_id);
}
int filename__sprintf_build_id(const char *pathname, char *sbuild_id)
{
struct build_id bid;
int ret;
ret = filename__read_build_id(pathname, &bid);
if (ret < 0)
return ret;
return build_id__sprintf(&bid, sbuild_id);
}
/* asnprintf consolidates asprintf and snprintf */
static int asnprintf(char **strp, size_t size, const char *fmt, ...)
{
va_list ap;
int ret;
if (!strp)
return -EINVAL;
va_start(ap, fmt);
if (*strp)
ret = vsnprintf(*strp, size, fmt, ap);
else
ret = vasprintf(strp, fmt, ap);
va_end(ap);
return ret;
}
char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
size_t size)
{
bool retry_old = true;
snprintf(bf, size, "%s/%s/%s/kallsyms",
buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
retry:
if (!access(bf, F_OK))
return bf;
if (retry_old) {
/* Try old style kallsyms cache */
snprintf(bf, size, "%s/%s/%s",
buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
retry_old = false;
goto retry;
}
return NULL;
}
char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
{
char *tmp = bf;
int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
sbuild_id, sbuild_id + 2);
if (ret < 0 || (tmp && size < (unsigned int)ret))
return NULL;
return bf;
}
/* The caller is responsible to free the returned buffer. */
char *build_id_cache__origname(const char *sbuild_id)
{
char *linkname;
char buf[PATH_MAX];
char *ret = NULL, *p;
size_t offs = 5; /* == strlen("../..") */
ssize_t len;
linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
if (!linkname)
return NULL;
len = readlink(linkname, buf, sizeof(buf) - 1);
if (len <= 0)
goto out;
buf[len] = '\0';
/* The link should be "../..<origpath>/<sbuild_id>" */
p = strrchr(buf, '/'); /* Cut off the "/<sbuild_id>" */
if (p && (p > buf + offs)) {
*p = '\0';
if (buf[offs + 1] == '[')
offs++; /*
* This is a DSO name, like [kernel.kallsyms].
* Skip the first '/', since this is not the
* cache of a regular file.
*/
ret = strdup(buf + offs); /* Skip "../..[/]" */
}
out:
free(linkname);
return ret;
}
/* Check if the given build_id cache is valid on current running system */
static bool build_id_cache__valid_id(char *sbuild_id)
{
char real_sbuild_id[SBUILD_ID_SIZE] = "";
char *pathname;
int ret = 0;
bool result = false;
pathname = build_id_cache__origname(sbuild_id);
if (!pathname)
return false;
if (!strcmp(pathname, DSO__NAME_KALLSYMS))
ret = sysfs__sprintf_build_id("/", real_sbuild_id);
else if (pathname[0] == '/')
ret = filename__sprintf_build_id(pathname, real_sbuild_id);
else
ret = -EINVAL; /* Should we support other special DSO cache? */
if (ret >= 0)
result = (strcmp(sbuild_id, real_sbuild_id) == 0);
free(pathname);
return result;
}
static const char *build_id_cache__basename(bool is_kallsyms, bool is_vdso,
bool is_debug)
{
return is_kallsyms ? "kallsyms" : (is_vdso ? "vdso" : (is_debug ?
"debug" : "elf"));
}
char *__dso__build_id_filename(const struct dso *dso, char *bf, size_t size,
bool is_debug, bool is_kallsyms)
{
bool is_vdso = dso__is_vdso((struct dso *)dso);
char sbuild_id[SBUILD_ID_SIZE];
char *linkname;
bool alloc = (bf == NULL);
int ret;
if (!dso->has_build_id)
return NULL;
build_id__sprintf(&dso->bid, sbuild_id);
linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
if (!linkname)
return NULL;
/* Check if old style build_id cache */
if (is_regular_file(linkname))
ret = asnprintf(&bf, size, "%s", linkname);
else
ret = asnprintf(&bf, size, "%s/%s", linkname,
build_id_cache__basename(is_kallsyms, is_vdso,
is_debug));
if (ret < 0 || (!alloc && size < (unsigned int)ret))
bf = NULL;
free(linkname);
return bf;
}
char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size,
bool is_debug)
{
bool is_kallsyms = dso__is_kallsyms((struct dso *)dso);
return __dso__build_id_filename(dso, bf, size, is_debug, is_kallsyms);
}
static int write_buildid(const char *name, size_t name_len, struct build_id *bid,
pid_t pid, u16 misc, struct feat_fd *fd)
{
int err;
struct perf_record_header_build_id b;
size_t len;
len = name_len + 1;
len = PERF_ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.data, bid->data, bid->size);
b.size = (u8) bid->size;
misc |= PERF_RECORD_MISC_BUILD_ID_SIZE;
b.pid = pid;
b.header.misc = misc;
b.header.size = sizeof(b) + len;
err = do_write(fd, &b, sizeof(b));
if (err < 0)
return err;
return write_padded(fd, name, name_len + 1, len);
}
static int machine__write_buildid_table(struct machine *machine,
struct feat_fd *fd)
{
int err = 0;
struct dso *pos;
u16 kmisc = PERF_RECORD_MISC_KERNEL,
umisc = PERF_RECORD_MISC_USER;
if (!machine__is_host(machine)) {
kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
umisc = PERF_RECORD_MISC_GUEST_USER;
}
dsos__for_each_with_build_id(pos, &machine->dsos.head) {
const char *name;
size_t name_len;
bool in_kernel = false;
if (!pos->hit && !dso__is_vdso(pos))
continue;
if (dso__is_vdso(pos)) {
name = pos->short_name;
name_len = pos->short_name_len;
} else if (dso__is_kcore(pos)) {
name = machine->mmap_name;
name_len = strlen(name);
} else {
name = pos->long_name;
name_len = pos->long_name_len;
}
in_kernel = pos->kernel ||
is_kernel_module(name,
PERF_RECORD_MISC_CPUMODE_UNKNOWN);
err = write_buildid(name, name_len, &pos->bid, machine->pid,
in_kernel ? kmisc : umisc, fd);
if (err)
break;
}
return err;
}
int perf_session__write_buildid_table(struct perf_session *session,
struct feat_fd *fd)
{
struct rb_node *nd;
int err = machine__write_buildid_table(&session->machines.host, fd);
if (err)
return err;
for (nd = rb_first_cached(&session->machines.guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
break;
}
return err;
}
static int __dsos__hit_all(struct list_head *head)
{
struct dso *pos;
list_for_each_entry(pos, head, node)
pos->hit = true;
return 0;
}
static int machine__hit_all_dsos(struct machine *machine)
{
return __dsos__hit_all(&machine->dsos.head);
}
int dsos__hit_all(struct perf_session *session)
{
struct rb_node *nd;
int err;
err = machine__hit_all_dsos(&session->machines.host);
if (err)
return err;
for (nd = rb_first_cached(&session->machines.guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
if (err)
return err;
}
return 0;
}
void disable_buildid_cache(void)
{
no_buildid_cache = true;
}
static bool lsdir_bid_head_filter(const char *name __maybe_unused,
struct dirent *d)
{
return (strlen(d->d_name) == 2) &&
isxdigit(d->d_name[0]) && isxdigit(d->d_name[1]);
}
static bool lsdir_bid_tail_filter(const char *name __maybe_unused,
struct dirent *d)
{
int i = 0;
while (isxdigit(d->d_name[i]) && i < SBUILD_ID_SIZE - 3)
i++;
return (i >= SBUILD_ID_MIN_SIZE - 3) && (i <= SBUILD_ID_SIZE - 3) &&
(d->d_name[i] == '\0');
}
struct strlist *build_id_cache__list_all(bool validonly)
{
struct strlist *toplist, *linklist = NULL, *bidlist;
struct str_node *nd, *nd2;
char *topdir, *linkdir = NULL;
char sbuild_id[SBUILD_ID_SIZE];
/* for filename__ functions */
if (validonly)
symbol__init(NULL);
/* Open the top-level directory */
if (asprintf(&topdir, "%s/.build-id/", buildid_dir) < 0)
return NULL;
bidlist = strlist__new(NULL, NULL);
if (!bidlist)
goto out;
toplist = lsdir(topdir, lsdir_bid_head_filter);
if (!toplist) {
pr_debug("Error in lsdir(%s): %d\n", topdir, errno);
/* If there is no buildid cache, return an empty list */
if (errno == ENOENT)
goto out;
goto err_out;
}
strlist__for_each_entry(nd, toplist) {
if (asprintf(&linkdir, "%s/%s", topdir, nd->s) < 0)
goto err_out;
/* Open the lower-level directory */
linklist = lsdir(linkdir, lsdir_bid_tail_filter);
if (!linklist) {
pr_debug("Error in lsdir(%s): %d\n", linkdir, errno);
goto err_out;
}
strlist__for_each_entry(nd2, linklist) {
if (snprintf(sbuild_id, SBUILD_ID_SIZE, "%s%s",
nd->s, nd2->s) > SBUILD_ID_SIZE - 1)
goto err_out;
if (validonly && !build_id_cache__valid_id(sbuild_id))
continue;
if (strlist__add(bidlist, sbuild_id) < 0)
goto err_out;
}
strlist__delete(linklist);
zfree(&linkdir);
}
out_free:
strlist__delete(toplist);
out:
free(topdir);
return bidlist;
err_out:
strlist__delete(linklist);
zfree(&linkdir);
strlist__delete(bidlist);
bidlist = NULL;
goto out_free;
}
static bool str_is_build_id(const char *maybe_sbuild_id, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (!isxdigit(maybe_sbuild_id[i]))
return false;
}
return true;
}
/* Return the valid complete build-id */
char *build_id_cache__complement(const char *incomplete_sbuild_id)
{
struct strlist *bidlist;
struct str_node *nd, *cand = NULL;
char *sbuild_id = NULL;
size_t len = strlen(incomplete_sbuild_id);
if (len >= SBUILD_ID_SIZE ||
!str_is_build_id(incomplete_sbuild_id, len))
return NULL;
bidlist = build_id_cache__list_all(true);
if (!bidlist)
return NULL;
strlist__for_each_entry(nd, bidlist) {
if (strncmp(nd->s, incomplete_sbuild_id, len) != 0)
continue;
if (cand) { /* Error: There are more than 2 candidates. */
cand = NULL;
break;
}
cand = nd;
}
if (cand)
sbuild_id = strdup(cand->s);
strlist__delete(bidlist);
return sbuild_id;
}
char *build_id_cache__cachedir(const char *sbuild_id, const char *name,
struct nsinfo *nsi, bool is_kallsyms,
bool is_vdso)
{
char *realname = NULL, *filename;
bool slash = is_kallsyms || is_vdso;
if (!slash)
realname = nsinfo__realpath(name, nsi);
if (asprintf(&filename, "%s%s%s%s%s", buildid_dir, slash ? "/" : "",
is_vdso ? DSO__NAME_VDSO : (realname ? realname : name),
sbuild_id ? "/" : "", sbuild_id ?: "") < 0)
filename = NULL;
free(realname);
return filename;
}
int build_id_cache__list_build_ids(const char *pathname, struct nsinfo *nsi,
struct strlist **result)
{
char *dir_name;
int ret = 0;
dir_name = build_id_cache__cachedir(NULL, pathname, nsi, false, false);
if (!dir_name)
return -ENOMEM;
*result = lsdir(dir_name, lsdir_no_dot_filter);
if (!*result)
ret = -errno;
free(dir_name);
return ret;
}
#if defined(HAVE_LIBELF_SUPPORT) && defined(HAVE_GELF_GETNOTE_SUPPORT)
static int build_id_cache__add_sdt_cache(const char *sbuild_id,
const char *realname,
struct nsinfo *nsi)
{
struct probe_cache *cache;
int ret;
struct nscookie nsc;
cache = probe_cache__new(sbuild_id, nsi);
if (!cache)
return -1;
nsinfo__mountns_enter(nsi, &nsc);
ret = probe_cache__scan_sdt(cache, realname);
nsinfo__mountns_exit(&nsc);
if (ret >= 0) {
pr_debug4("Found %d SDTs in %s\n", ret, realname);
if (probe_cache__commit(cache) < 0)
ret = -1;
}
probe_cache__delete(cache);
return ret;
}
#else
#define build_id_cache__add_sdt_cache(sbuild_id, realname, nsi) (0)
#endif
static char *build_id_cache__find_debug(const char *sbuild_id,
struct nsinfo *nsi,
const char *root_dir)
{
const char *dirname = "/usr/lib/debug/.build-id/";
char *realname = NULL;
char dirbuf[PATH_MAX];
char *debugfile;
struct nscookie nsc;
size_t len = 0;
debugfile = calloc(1, PATH_MAX);
if (!debugfile)
goto out;
if (root_dir) {
path__join(dirbuf, PATH_MAX, root_dir, dirname);
dirname = dirbuf;
}
len = __symbol__join_symfs(debugfile, PATH_MAX, dirname);
snprintf(debugfile + len, PATH_MAX - len, "%.2s/%s.debug", sbuild_id,
sbuild_id + 2);
nsinfo__mountns_enter(nsi, &nsc);
realname = realpath(debugfile, NULL);
if (realname && access(realname, R_OK))
zfree(&realname);
nsinfo__mountns_exit(&nsc);
#ifdef HAVE_DEBUGINFOD_SUPPORT
if (realname == NULL) {
debuginfod_client* c;
pr_debug("Downloading debug info with build id %s\n", sbuild_id);
c = debuginfod_begin();
if (c != NULL) {
int fd = debuginfod_find_debuginfo(c,
(const unsigned char*)sbuild_id, 0,
&realname);
if (fd >= 0)
close(fd); /* retaining reference by realname */
debuginfod_end(c);
}
}
#endif
out:
free(debugfile);
return realname;
}
int
build_id_cache__add(const char *sbuild_id, const char *name, const char *realname,
struct nsinfo *nsi, bool is_kallsyms, bool is_vdso,
const char *proper_name, const char *root_dir)
{
const size_t size = PATH_MAX;
char *filename = NULL, *dir_name = NULL, *linkname = zalloc(size), *tmp;
char *debugfile = NULL;
int err = -1;
if (!proper_name)
proper_name = name;
dir_name = build_id_cache__cachedir(sbuild_id, proper_name, nsi, is_kallsyms,
is_vdso);
if (!dir_name)
goto out_free;
/* Remove old style build-id cache */
if (is_regular_file(dir_name))
if (unlink(dir_name))
goto out_free;
if (mkdir_p(dir_name, 0755))
goto out_free;
/* Save the allocated buildid dirname */
if (asprintf(&filename, "%s/%s", dir_name,
build_id_cache__basename(is_kallsyms, is_vdso,
false)) < 0) {
filename = NULL;
goto out_free;
}
if (access(filename, F_OK)) {
if (is_kallsyms) {
if (copyfile("/proc/kallsyms", filename))
goto out_free;
} else if (nsi && nsinfo__need_setns(nsi)) {
if (copyfile_ns(name, filename, nsi))
goto out_free;
} else if (link(realname, filename) && errno != EEXIST) {
struct stat f_stat;
if (!(stat(name, &f_stat) < 0) &&
copyfile_mode(name, filename, f_stat.st_mode))
goto out_free;
}
}
/* Some binaries are stripped, but have .debug files with their symbol
* table. Check to see if we can locate one of those, since the elf
* file itself may not be very useful to users of our tools without a
* symtab.
*/
if (!is_kallsyms && !is_vdso &&
strncmp(".ko", name + strlen(name) - 3, 3)) {
debugfile = build_id_cache__find_debug(sbuild_id, nsi, root_dir);
if (debugfile) {
zfree(&filename);
if (asprintf(&filename, "%s/%s", dir_name,
build_id_cache__basename(false, false, true)) < 0) {
filename = NULL;
goto out_free;
}
if (access(filename, F_OK)) {
if (nsi && nsinfo__need_setns(nsi)) {
if (copyfile_ns(debugfile, filename,
nsi))
goto out_free;
} else if (link(debugfile, filename) &&
errno != EEXIST &&
copyfile(debugfile, filename))
goto out_free;
}
}
}
if (!build_id_cache__linkname(sbuild_id, linkname, size))
goto out_free;
tmp = strrchr(linkname, '/');
*tmp = '\0';
if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
goto out_free;
*tmp = '/';
tmp = dir_name + strlen(buildid_dir) - 5;
memcpy(tmp, "../..", 5);
if (symlink(tmp, linkname) == 0) {
err = 0;
} else if (errno == EEXIST) {
char path[PATH_MAX];
ssize_t len;
len = readlink(linkname, path, sizeof(path) - 1);
if (len <= 0) {
pr_err("Can't read link: %s\n", linkname);
goto out_free;
}
path[len] = '\0';
if (strcmp(tmp, path)) {
pr_debug("build <%s> already linked to %s\n",
sbuild_id, linkname);
}
err = 0;
}
/* Update SDT cache : error is just warned */
if (realname &&
build_id_cache__add_sdt_cache(sbuild_id, realname, nsi) < 0)
pr_debug4("Failed to update/scan SDT cache for %s\n", realname);
out_free:
free(filename);
free(debugfile);
free(dir_name);
free(linkname);
return err;
}
int __build_id_cache__add_s(const char *sbuild_id, const char *name,
struct nsinfo *nsi, bool is_kallsyms, bool is_vdso,
const char *proper_name, const char *root_dir)
{
char *realname = NULL;
int err = -1;
if (!is_kallsyms) {
if (!is_vdso)
realname = nsinfo__realpath(name, nsi);
else
realname = realpath(name, NULL);
if (!realname)
goto out_free;
}
err = build_id_cache__add(sbuild_id, name, realname, nsi,
is_kallsyms, is_vdso, proper_name, root_dir);
out_free:
if (!is_kallsyms)
free(realname);
return err;
}
static int build_id_cache__add_b(const struct build_id *bid,
const char *name, struct nsinfo *nsi,
bool is_kallsyms, bool is_vdso,
const char *proper_name,
const char *root_dir)
{
char sbuild_id[SBUILD_ID_SIZE];
build_id__sprintf(bid, sbuild_id);
return __build_id_cache__add_s(sbuild_id, name, nsi, is_kallsyms,
is_vdso, proper_name, root_dir);
}
bool build_id_cache__cached(const char *sbuild_id)
{
bool ret = false;
char *filename = build_id_cache__linkname(sbuild_id, NULL, 0);
if (filename && !access(filename, F_OK))
ret = true;
free(filename);
return ret;
}
int build_id_cache__remove_s(const char *sbuild_id)
{
const size_t size = PATH_MAX;
char *filename = zalloc(size),
*linkname = zalloc(size), *tmp;
int err = -1;
if (filename == NULL || linkname == NULL)
goto out_free;
if (!build_id_cache__linkname(sbuild_id, linkname, size))
goto out_free;
if (access(linkname, F_OK))
goto out_free;
if (readlink(linkname, filename, size - 1) < 0)
goto out_free;
if (unlink(linkname))
goto out_free;
/*
* Since the link is relative, we must make it absolute:
*/
tmp = strrchr(linkname, '/') + 1;
snprintf(tmp, size - (tmp - linkname), "%s", filename);
if (rm_rf(linkname))
goto out_free;
err = 0;
out_free:
free(filename);
free(linkname);
return err;
}
static int filename__read_build_id_ns(const char *filename,
struct build_id *bid,
struct nsinfo *nsi)
{
struct nscookie nsc;
int ret;
nsinfo__mountns_enter(nsi, &nsc);
ret = filename__read_build_id(filename, bid);
nsinfo__mountns_exit(&nsc);
return ret;
}
static bool dso__build_id_mismatch(struct dso *dso, const char *name)
{
struct build_id bid;
bool ret = false;
mutex_lock(&dso->lock);
if (filename__read_build_id_ns(name, &bid, dso->nsinfo) >= 0)
ret = !dso__build_id_equal(dso, &bid);
mutex_unlock(&dso->lock);
return ret;
}
static int dso__cache_build_id(struct dso *dso, struct machine *machine,
void *priv __maybe_unused)
{
bool is_kallsyms = dso__is_kallsyms(dso);
bool is_vdso = dso__is_vdso(dso);
const char *name = dso->long_name;
const char *proper_name = NULL;
const char *root_dir = NULL;
char *allocated_name = NULL;
int ret = 0;
if (!dso->has_build_id)
return 0;
if (dso__is_kcore(dso)) {
is_kallsyms = true;
name = machine->mmap_name;
}
if (!machine__is_host(machine)) {
if (*machine->root_dir) {
root_dir = machine->root_dir;
ret = asprintf(&allocated_name, "%s/%s", root_dir, name);
if (ret < 0)
return ret;
proper_name = name;
name = allocated_name;
} else if (is_kallsyms) {
/* Cannot get guest kallsyms */
return 0;
}
}
if (!is_kallsyms && dso__build_id_mismatch(dso, name))
goto out_free;
mutex_lock(&dso->lock);
ret = build_id_cache__add_b(&dso->bid, name, dso->nsinfo,
is_kallsyms, is_vdso, proper_name, root_dir);
mutex_unlock(&dso->lock);
out_free:
free(allocated_name);
return ret;
}
static int
machines__for_each_dso(struct machines *machines, machine__dso_t fn, void *priv)
{
int ret = machine__for_each_dso(&machines->host, fn, priv);
struct rb_node *nd;
for (nd = rb_first_cached(&machines->guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__for_each_dso(pos, fn, priv);
}
return ret ? -1 : 0;
}
int __perf_session__cache_build_ids(struct perf_session *session,
machine__dso_t fn, void *priv)
{
if (no_buildid_cache)
return 0;
if (mkdir(buildid_dir, 0755) != 0 && errno != EEXIST)
return -1;
return machines__for_each_dso(&session->machines, fn, priv) ? -1 : 0;
}
int perf_session__cache_build_ids(struct perf_session *session)
{
return __perf_session__cache_build_ids(session, dso__cache_build_id, NULL);
}
static bool machine__read_build_ids(struct machine *machine, bool with_hits)
{
return __dsos__read_build_ids(&machine->dsos.head, with_hits);
}
bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
{
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
for (nd = rb_first_cached(&session->machines.guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
return ret;
}
void build_id__init(struct build_id *bid, const u8 *data, size_t size)
{
WARN_ON(size > BUILD_ID_SIZE);
memcpy(bid->data, data, size);
bid->size = size;
}
bool build_id__is_defined(const struct build_id *bid)
{
return bid && bid->size ? !!memchr_inv(bid->data, 0, bid->size) : false;
}
| linux-master | tools/perf/util/build-id.c |
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/param.h>
#include <unistd.h>
#include <api/fs/tracing_path.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
#include "build-id.h"
#include "debug.h"
#include "evsel.h"
#include "metricgroup.h"
#include "parse-events.h"
#include "pmu.h"
#include "pmus.h"
#include "print-events.h"
#include "probe-file.h"
#include "string2.h"
#include "strlist.h"
#include "tracepoint.h"
#include "pfm.h"
#include "thread_map.h"
#define MAX_NAME_LEN 100
/** Strings corresponding to enum perf_type_id. */
static const char * const event_type_descriptors[] = {
"Hardware event",
"Software event",
"Tracepoint event",
"Hardware cache event",
"Raw hardware event descriptor",
"Hardware breakpoint",
};
static const struct event_symbol event_symbols_tool[PERF_TOOL_MAX] = {
[PERF_TOOL_DURATION_TIME] = {
.symbol = "duration_time",
.alias = "",
},
[PERF_TOOL_USER_TIME] = {
.symbol = "user_time",
.alias = "",
},
[PERF_TOOL_SYSTEM_TIME] = {
.symbol = "system_time",
.alias = "",
},
};
/*
* Print the events from <debugfs_mount_point>/tracing/events
*/
void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unused, void *print_state __maybe_unused)
{
char *events_path = get_tracing_file("events");
int events_fd = open(events_path, O_PATH);
put_tracing_file(events_path);
if (events_fd < 0) {
printf("Error: failed to open tracing events directory\n");
return;
}
#ifdef HAVE_SCANDIRAT_SUPPORT
{
struct dirent **sys_namelist = NULL;
int sys_items = tracing_events__scandir_alphasort(&sys_namelist);
for (int i = 0; i < sys_items; i++) {
struct dirent *sys_dirent = sys_namelist[i];
struct dirent **evt_namelist = NULL;
int dir_fd;
int evt_items;
if (sys_dirent->d_type != DT_DIR ||
!strcmp(sys_dirent->d_name, ".") ||
!strcmp(sys_dirent->d_name, ".."))
goto next_sys;
dir_fd = openat(events_fd, sys_dirent->d_name, O_PATH);
if (dir_fd < 0)
goto next_sys;
evt_items = scandirat(events_fd, sys_dirent->d_name, &evt_namelist, NULL, alphasort);
for (int j = 0; j < evt_items; j++) {
struct dirent *evt_dirent = evt_namelist[j];
char evt_path[MAXPATHLEN];
int evt_fd;
if (evt_dirent->d_type != DT_DIR ||
!strcmp(evt_dirent->d_name, ".") ||
!strcmp(evt_dirent->d_name, ".."))
goto next_evt;
snprintf(evt_path, sizeof(evt_path), "%s/id", evt_dirent->d_name);
evt_fd = openat(dir_fd, evt_path, O_RDONLY);
if (evt_fd < 0)
goto next_evt;
close(evt_fd);
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent->d_name, evt_dirent->d_name);
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
evt_path,
/*event_alias=*/NULL,
/*scale_unit=*/NULL,
/*deprecated=*/false,
"Tracepoint event",
/*desc=*/NULL,
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
next_evt:
free(evt_namelist[j]);
}
close(dir_fd);
free(evt_namelist);
next_sys:
free(sys_namelist[i]);
}
free(sys_namelist);
}
#else
printf("\nWARNING: Your libc doesn't have the scandirat function, please ask its maintainers to implement it.\n"
" As a rough fallback, please do 'ls %s' to see the available tracepoint events.\n", events_path);
#endif
close(events_fd);
}
void print_sdt_events(const struct print_callbacks *print_cb, void *print_state)
{
struct strlist *bidlist, *sdtlist;
struct str_node *bid_nd, *sdt_name, *next_sdt_name;
const char *last_sdt_name = NULL;
/*
* The implicitly sorted sdtlist will hold the tracepoint name followed
* by @<buildid>. If the tracepoint name is unique (determined by
* looking at the adjacent nodes) the @<buildid> is dropped otherwise
* the executable path and buildid are added to the name.
*/
sdtlist = strlist__new(NULL, NULL);
if (!sdtlist) {
pr_debug("Failed to allocate new strlist for SDT\n");
return;
}
bidlist = build_id_cache__list_all(true);
if (!bidlist) {
pr_debug("Failed to get buildids: %d\n", errno);
return;
}
strlist__for_each_entry(bid_nd, bidlist) {
struct probe_cache *pcache;
struct probe_cache_entry *ent;
pcache = probe_cache__new(bid_nd->s, NULL);
if (!pcache)
continue;
list_for_each_entry(ent, &pcache->entries, node) {
char buf[1024];
snprintf(buf, sizeof(buf), "%s:%s@%s",
ent->pev.group, ent->pev.event, bid_nd->s);
strlist__add(sdtlist, buf);
}
probe_cache__delete(pcache);
}
strlist__delete(bidlist);
strlist__for_each_entry(sdt_name, sdtlist) {
bool show_detail = false;
char *bid = strchr(sdt_name->s, '@');
char *evt_name = NULL;
if (bid)
*(bid++) = '\0';
if (last_sdt_name && !strcmp(last_sdt_name, sdt_name->s)) {
show_detail = true;
} else {
next_sdt_name = strlist__next(sdt_name);
if (next_sdt_name) {
char *bid2 = strchr(next_sdt_name->s, '@');
if (bid2)
*bid2 = '\0';
if (strcmp(sdt_name->s, next_sdt_name->s) == 0)
show_detail = true;
if (bid2)
*bid2 = '@';
}
}
last_sdt_name = sdt_name->s;
if (show_detail) {
char *path = build_id_cache__origname(bid);
if (path) {
if (asprintf(&evt_name, "%s@%s(%.12s)", sdt_name->s, path, bid) < 0)
evt_name = NULL;
free(path);
}
}
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
evt_name ?: sdt_name->s,
/*event_alias=*/NULL,
/*deprecated=*/false,
/*scale_unit=*/NULL,
"SDT event",
/*desc=*/NULL,
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
free(evt_name);
}
strlist__delete(sdtlist);
}
bool is_event_supported(u8 type, u64 config)
{
bool ret = true;
int open_return;
struct evsel *evsel;
struct perf_event_attr attr = {
.type = type,
.config = config,
.disabled = 1,
};
struct perf_thread_map *tmap = thread_map__new_by_tid(0);
if (tmap == NULL)
return false;
evsel = evsel__new(&attr);
if (evsel) {
open_return = evsel__open(evsel, NULL, tmap);
ret = open_return >= 0;
if (open_return == -EACCES) {
/*
* This happens if the paranoid value
* /proc/sys/kernel/perf_event_paranoid is set to 2
* Re-run with exclude_kernel set; we don't do that
* by default as some ARM machines do not support it.
*
*/
evsel->core.attr.exclude_kernel = 1;
ret = evsel__open(evsel, NULL, tmap) >= 0;
}
evsel__delete(evsel);
}
perf_thread_map__put(tmap);
return ret;
}
int print_hwcache_events(const struct print_callbacks *print_cb, void *print_state)
{
struct perf_pmu *pmu = NULL;
const char *event_type_descriptor = event_type_descriptors[PERF_TYPE_HW_CACHE];
/*
* Only print core PMUs, skipping uncore for performance and
* PERF_TYPE_SOFTWARE that can succeed in opening legacy cache evenst.
*/
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
if (pmu->is_uncore || pmu->type == PERF_TYPE_SOFTWARE)
continue;
for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
/* skip invalid cache type */
if (!evsel__is_cache_op_valid(type, op))
continue;
for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) {
char name[64];
char alias_name[128];
__u64 config;
int ret;
__evsel__hw_cache_type_op_res_name(type, op, res,
name, sizeof(name));
ret = parse_events__decode_legacy_cache(name, pmu->type,
&config);
if (ret || !is_event_supported(PERF_TYPE_HW_CACHE, config))
continue;
snprintf(alias_name, sizeof(alias_name), "%s/%s/",
pmu->name, name);
print_cb->print_event(print_state,
"cache",
pmu->name,
name,
alias_name,
/*scale_unit=*/NULL,
/*deprecated=*/false,
event_type_descriptor,
/*desc=*/NULL,
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
}
}
}
}
return 0;
}
void print_tool_events(const struct print_callbacks *print_cb, void *print_state)
{
// Start at 1 because the first enum entry means no tool event.
for (int i = 1; i < PERF_TOOL_MAX; ++i) {
print_cb->print_event(print_state,
"tool",
/*pmu_name=*/NULL,
event_symbols_tool[i].symbol,
event_symbols_tool[i].alias,
/*scale_unit=*/NULL,
/*deprecated=*/false,
"Tool event",
/*desc=*/NULL,
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
}
}
void print_symbol_events(const struct print_callbacks *print_cb, void *print_state,
unsigned int type, const struct event_symbol *syms,
unsigned int max)
{
struct strlist *evt_name_list = strlist__new(NULL, NULL);
struct str_node *nd;
if (!evt_name_list) {
pr_debug("Failed to allocate new strlist for symbol events\n");
return;
}
for (unsigned int i = 0; i < max; i++) {
/*
* New attr.config still not supported here, the latest
* example was PERF_COUNT_SW_CGROUP_SWITCHES
*/
if (syms[i].symbol == NULL)
continue;
if (!is_event_supported(type, i))
continue;
if (strlen(syms[i].alias)) {
char name[MAX_NAME_LEN];
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms[i].symbol, syms[i].alias);
strlist__add(evt_name_list, name);
} else
strlist__add(evt_name_list, syms[i].symbol);
}
strlist__for_each_entry(nd, evt_name_list) {
char *alias = strstr(nd->s, " OR ");
if (alias) {
*alias = '\0';
alias += 4;
}
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
nd->s,
alias,
/*scale_unit=*/NULL,
/*deprecated=*/false,
event_type_descriptors[type],
/*desc=*/NULL,
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
}
strlist__delete(evt_name_list);
}
/*
* Print the help text for the event symbols:
*/
void print_events(const struct print_callbacks *print_cb, void *print_state)
{
print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX);
print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX);
print_tool_events(print_cb, print_state);
print_hwcache_events(print_cb, print_state);
perf_pmus__print_pmu_events(print_cb, print_state);
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
"rNNN",
/*event_alias=*/NULL,
/*scale_unit=*/NULL,
/*deprecated=*/false,
event_type_descriptors[PERF_TYPE_RAW],
/*desc=*/NULL,
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
"cpu/t1=v1[,t2=v2,t3 ...]/modifier",
/*event_alias=*/NULL,
/*scale_unit=*/NULL,
/*deprecated=*/false,
event_type_descriptors[PERF_TYPE_RAW],
"(see 'man perf-list' on how to encode it)",
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
"mem:<addr>[/len][:access]",
/*scale_unit=*/NULL,
/*event_alias=*/NULL,
/*deprecated=*/false,
event_type_descriptors[PERF_TYPE_BREAKPOINT],
/*desc=*/NULL,
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
print_tracepoint_events(print_cb, print_state);
print_sdt_events(print_cb, print_state);
metricgroup__print(print_cb, print_state);
print_libpfm_events(print_cb, print_state);
}
| linux-master | tools/perf/util/print-events.c |
// SPDX-License-Identifier: GPL-2.0
#include "cache.h"
#include "debug.h"
#include "strbuf.h"
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
/*
* Used as the default ->buf value, so that people can always assume
* buf is non NULL and ->buf is NUL terminated even for a freshly
* initialized strbuf.
*/
char strbuf_slopbuf[1];
int strbuf_init(struct strbuf *sb, ssize_t hint)
{
sb->alloc = sb->len = 0;
sb->buf = strbuf_slopbuf;
if (hint)
return strbuf_grow(sb, hint);
return 0;
}
void strbuf_release(struct strbuf *sb)
{
if (sb->alloc) {
zfree(&sb->buf);
strbuf_init(sb, 0);
}
}
char *strbuf_detach(struct strbuf *sb, size_t *sz)
{
char *res = sb->alloc ? sb->buf : NULL;
if (sz)
*sz = sb->len;
strbuf_init(sb, 0);
return res;
}
int strbuf_grow(struct strbuf *sb, size_t extra)
{
char *buf;
size_t nr = sb->len + extra + 1;
if (nr < sb->alloc)
return 0;
if (nr <= sb->len)
return -E2BIG;
if (alloc_nr(sb->alloc) > nr)
nr = alloc_nr(sb->alloc);
/*
* Note that sb->buf == strbuf_slopbuf if sb->alloc == 0, and it is
* a static variable. Thus we have to avoid passing it to realloc.
*/
buf = realloc(sb->alloc ? sb->buf : NULL, nr * sizeof(*buf));
if (!buf)
return -ENOMEM;
sb->buf = buf;
sb->alloc = nr;
return 0;
}
int strbuf_addch(struct strbuf *sb, int c)
{
int ret = strbuf_grow(sb, 1);
if (ret)
return ret;
sb->buf[sb->len++] = c;
sb->buf[sb->len] = '\0';
return 0;
}
int strbuf_add(struct strbuf *sb, const void *data, size_t len)
{
int ret = strbuf_grow(sb, len);
if (ret)
return ret;
memcpy(sb->buf + sb->len, data, len);
return strbuf_setlen(sb, sb->len + len);
}
static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
{
int len, ret;
va_list ap_saved;
if (!strbuf_avail(sb)) {
ret = strbuf_grow(sb, 64);
if (ret)
return ret;
}
va_copy(ap_saved, ap);
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
if (len < 0) {
va_end(ap_saved);
return len;
}
if (len > strbuf_avail(sb)) {
ret = strbuf_grow(sb, len);
if (ret) {
va_end(ap_saved);
return ret;
}
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
if (len > strbuf_avail(sb)) {
pr_debug("this should not happen, your vsnprintf is broken");
va_end(ap_saved);
return -EINVAL;
}
}
va_end(ap_saved);
return strbuf_setlen(sb, sb->len + len);
}
int strbuf_addf(struct strbuf *sb, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = strbuf_addv(sb, fmt, ap);
va_end(ap);
return ret;
}
ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
{
size_t oldlen = sb->len;
size_t oldalloc = sb->alloc;
int ret;
ret = strbuf_grow(sb, hint ? hint : 8192);
if (ret)
return ret;
for (;;) {
ssize_t cnt;
cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1);
if (cnt < 0) {
if (oldalloc == 0)
strbuf_release(sb);
else
strbuf_setlen(sb, oldlen);
return cnt;
}
if (!cnt)
break;
sb->len += cnt;
ret = strbuf_grow(sb, 8192);
if (ret)
return ret;
}
sb->buf[sb->len] = '\0';
return sb->len - oldlen;
}
| linux-master | tools/perf/util/strbuf.c |
// SPDX-License-Identifier: GPL-2.0
#include "cpumap.h"
#include "debug.h"
#include "env.h"
#include "util/header.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
#include "cgroup.h"
#include <errno.h>
#include <sys/utsname.h>
#include <stdlib.h>
#include <string.h>
#include "pmus.h"
#include "strbuf.h"
struct perf_env perf_env;
#ifdef HAVE_LIBBPF_SUPPORT
#include "bpf-event.h"
#include "bpf-utils.h"
#include <bpf/libbpf.h>
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node)
{
__u32 prog_id = info_node->info_linear->info.id;
struct bpf_prog_info_node *node;
struct rb_node *parent = NULL;
struct rb_node **p;
down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.infos.rb_node;
while (*p != NULL) {
parent = *p;
node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
if (prog_id < node->info_linear->info.id) {
p = &(*p)->rb_left;
} else if (prog_id > node->info_linear->info.id) {
p = &(*p)->rb_right;
} else {
pr_debug("duplicated bpf prog info %u\n", prog_id);
goto out;
}
}
rb_link_node(&info_node->rb_node, parent, p);
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
env->bpf_progs.infos_cnt++;
out:
up_write(&env->bpf_progs.lock);
}
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id)
{
struct bpf_prog_info_node *node = NULL;
struct rb_node *n;
down_read(&env->bpf_progs.lock);
n = env->bpf_progs.infos.rb_node;
while (n) {
node = rb_entry(n, struct bpf_prog_info_node, rb_node);
if (prog_id < node->info_linear->info.id)
n = n->rb_left;
else if (prog_id > node->info_linear->info.id)
n = n->rb_right;
else
goto out;
}
node = NULL;
out:
up_read(&env->bpf_progs.lock);
return node;
}
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
struct rb_node *parent = NULL;
__u32 btf_id = btf_node->id;
struct btf_node *node;
struct rb_node **p;
bool ret = true;
down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.btfs.rb_node;
while (*p != NULL) {
parent = *p;
node = rb_entry(parent, struct btf_node, rb_node);
if (btf_id < node->id) {
p = &(*p)->rb_left;
} else if (btf_id > node->id) {
p = &(*p)->rb_right;
} else {
pr_debug("duplicated btf %u\n", btf_id);
ret = false;
goto out;
}
}
rb_link_node(&btf_node->rb_node, parent, p);
rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
env->bpf_progs.btfs_cnt++;
out:
up_write(&env->bpf_progs.lock);
return ret;
}
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
{
struct btf_node *node = NULL;
struct rb_node *n;
down_read(&env->bpf_progs.lock);
n = env->bpf_progs.btfs.rb_node;
while (n) {
node = rb_entry(n, struct btf_node, rb_node);
if (btf_id < node->id)
n = n->rb_left;
else if (btf_id > node->id)
n = n->rb_right;
else
goto out;
}
node = NULL;
out:
up_read(&env->bpf_progs.lock);
return node;
}
/* purge data in bpf_progs.infos tree */
static void perf_env__purge_bpf(struct perf_env *env)
{
struct rb_root *root;
struct rb_node *next;
down_write(&env->bpf_progs.lock);
root = &env->bpf_progs.infos;
next = rb_first(root);
while (next) {
struct bpf_prog_info_node *node;
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
rb_erase(&node->rb_node, root);
zfree(&node->info_linear);
free(node);
}
env->bpf_progs.infos_cnt = 0;
root = &env->bpf_progs.btfs;
next = rb_first(root);
while (next) {
struct btf_node *node;
node = rb_entry(next, struct btf_node, rb_node);
next = rb_next(&node->rb_node);
rb_erase(&node->rb_node, root);
free(node);
}
env->bpf_progs.btfs_cnt = 0;
up_write(&env->bpf_progs.lock);
}
#else // HAVE_LIBBPF_SUPPORT
static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
{
}
#endif // HAVE_LIBBPF_SUPPORT
void perf_env__exit(struct perf_env *env)
{
int i, j;
perf_env__purge_bpf(env);
perf_env__purge_cgroups(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
zfree(&env->arch);
zfree(&env->cpu_desc);
zfree(&env->cpuid);
zfree(&env->cmdline);
zfree(&env->cmdline_argv);
zfree(&env->sibling_dies);
zfree(&env->sibling_cores);
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
for (i = 0; i < env->nr_cpu_pmu_caps; i++)
zfree(&env->cpu_pmu_caps[i]);
zfree(&env->cpu_pmu_caps);
zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
zfree(&env->numa_nodes);
for (i = 0; i < env->caches_cnt; i++)
cpu_cache_level__free(&env->caches[i]);
zfree(&env->caches);
for (i = 0; i < env->nr_memory_nodes; i++)
zfree(&env->memory_nodes[i].set);
zfree(&env->memory_nodes);
for (i = 0; i < env->nr_hybrid_nodes; i++) {
zfree(&env->hybrid_nodes[i].pmu_name);
zfree(&env->hybrid_nodes[i].cpus);
}
zfree(&env->hybrid_nodes);
for (i = 0; i < env->nr_pmus_with_caps; i++) {
for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
zfree(&env->pmu_caps[i].caps[j]);
zfree(&env->pmu_caps[i].caps);
zfree(&env->pmu_caps[i].pmu_name);
}
zfree(&env->pmu_caps);
}
void perf_env__init(struct perf_env *env)
{
#ifdef HAVE_LIBBPF_SUPPORT
env->bpf_progs.infos = RB_ROOT;
env->bpf_progs.btfs = RB_ROOT;
init_rwsem(&env->bpf_progs.lock);
#endif
env->kernel_is_64_bit = -1;
}
static void perf_env__init_kernel_mode(struct perf_env *env)
{
const char *arch = perf_env__raw_arch(env);
if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
!strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
!strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
!strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
env->kernel_is_64_bit = 1;
else
env->kernel_is_64_bit = 0;
}
int perf_env__kernel_is_64_bit(struct perf_env *env)
{
if (env->kernel_is_64_bit == -1)
perf_env__init_kernel_mode(env);
return env->kernel_is_64_bit;
}
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
/* do not include NULL termination */
env->cmdline_argv = calloc(argc, sizeof(char *));
if (env->cmdline_argv == NULL)
goto out_enomem;
/*
* Must copy argv contents because it gets moved around during option
* parsing:
*/
for (i = 0; i < argc ; i++) {
env->cmdline_argv[i] = argv[i];
if (env->cmdline_argv[i] == NULL)
goto out_free;
}
env->nr_cmdline = argc;
return 0;
out_free:
zfree(&env->cmdline_argv);
out_enomem:
return -ENOMEM;
}
int perf_env__read_cpu_topology_map(struct perf_env *env)
{
int idx, nr_cpus;
if (env->cpu != NULL)
return 0;
if (env->nr_cpus_avail == 0)
env->nr_cpus_avail = cpu__max_present_cpu().cpu;
nr_cpus = env->nr_cpus_avail;
if (nr_cpus == -1)
return -EINVAL;
env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
if (env->cpu == NULL)
return -ENOMEM;
for (idx = 0; idx < nr_cpus; ++idx) {
struct perf_cpu cpu = { .cpu = idx };
env->cpu[idx].core_id = cpu__get_core_id(cpu);
env->cpu[idx].socket_id = cpu__get_socket_id(cpu);
env->cpu[idx].die_id = cpu__get_die_id(cpu);
}
env->nr_cpus_avail = nr_cpus;
return 0;
}
int perf_env__read_pmu_mappings(struct perf_env *env)
{
struct perf_pmu *pmu = NULL;
u32 pmu_num = 0;
struct strbuf sb;
while ((pmu = perf_pmus__scan(pmu)))
pmu_num++;
if (!pmu_num) {
pr_debug("pmu mappings not available\n");
return -ENOENT;
}
env->nr_pmu_mappings = pmu_num;
if (strbuf_init(&sb, 128 * pmu_num) < 0)
return -ENOMEM;
while ((pmu = perf_pmus__scan(pmu))) {
if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
goto error;
/* include a NULL character at the end */
if (strbuf_add(&sb, "", 1) < 0)
goto error;
}
env->pmu_mappings = strbuf_detach(&sb, NULL);
return 0;
error:
strbuf_release(&sb);
return -1;
}
int perf_env__read_cpuid(struct perf_env *env)
{
char cpuid[128];
int err = get_cpuid(cpuid, sizeof(cpuid));
if (err)
return err;
free(env->cpuid);
env->cpuid = strdup(cpuid);
if (env->cpuid == NULL)
return ENOMEM;
return 0;
}
static int perf_env__read_arch(struct perf_env *env)
{
struct utsname uts;
if (env->arch)
return 0;
if (!uname(&uts))
env->arch = strdup(uts.machine);
return env->arch ? 0 : -ENOMEM;
}
static int perf_env__read_nr_cpus_avail(struct perf_env *env)
{
if (env->nr_cpus_avail == 0)
env->nr_cpus_avail = cpu__max_present_cpu().cpu;
return env->nr_cpus_avail ? 0 : -ENOENT;
}
const char *perf_env__raw_arch(struct perf_env *env)
{
return env && !perf_env__read_arch(env) ? env->arch : "unknown";
}
int perf_env__nr_cpus_avail(struct perf_env *env)
{
return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
}
void cpu_cache_level__free(struct cpu_cache_level *cache)
{
zfree(&cache->type);
zfree(&cache->map);
zfree(&cache->size);
}
/*
* Return architecture name in a normalized form.
* The conversion logic comes from the Makefile.
*/
static const char *normalize_arch(char *arch)
{
if (!strcmp(arch, "x86_64"))
return "x86";
if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
return "x86";
if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
return "sparc";
if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
return "arm64";
if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
return "arm";
if (!strncmp(arch, "s390", 4))
return "s390";
if (!strncmp(arch, "parisc", 6))
return "parisc";
if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
return "powerpc";
if (!strncmp(arch, "mips", 4))
return "mips";
if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
return "sh";
if (!strncmp(arch, "loongarch", 9))
return "loongarch";
return arch;
}
const char *perf_env__arch(struct perf_env *env)
{
char *arch_name;
if (!env || !env->arch) { /* Assume local operation */
static struct utsname uts = { .machine[0] = '\0', };
if (uts.machine[0] == '\0' && uname(&uts) < 0)
return NULL;
arch_name = uts.machine;
} else
arch_name = env->arch;
return normalize_arch(arch_name);
}
const char *perf_env__cpuid(struct perf_env *env)
{
int status;
if (!env || !env->cpuid) { /* Assume local operation */
status = perf_env__read_cpuid(env);
if (status)
return NULL;
}
return env->cpuid;
}
int perf_env__nr_pmu_mappings(struct perf_env *env)
{
int status;
if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
status = perf_env__read_pmu_mappings(env);
if (status)
return 0;
}
return env->nr_pmu_mappings;
}
const char *perf_env__pmu_mappings(struct perf_env *env)
{
int status;
if (!env || !env->pmu_mappings) { /* Assume local operation */
status = perf_env__read_pmu_mappings(env);
if (status)
return NULL;
}
return env->pmu_mappings;
}
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
{
if (!env->nr_numa_map) {
struct numa_node *nn;
int i, nr = 0;
for (i = 0; i < env->nr_numa_nodes; i++) {
nn = &env->numa_nodes[i];
nr = max(nr, perf_cpu_map__max(nn->map).cpu);
}
nr++;
/*
* We initialize the numa_map array to prepare
* it for missing cpus, which return node -1
*/
env->numa_map = malloc(nr * sizeof(int));
if (!env->numa_map)
return -1;
for (i = 0; i < nr; i++)
env->numa_map[i] = -1;
env->nr_numa_map = nr;
for (i = 0; i < env->nr_numa_nodes; i++) {
struct perf_cpu tmp;
int j;
nn = &env->numa_nodes[i];
perf_cpu_map__for_each_cpu(tmp, j, nn->map)
env->numa_map[tmp.cpu] = i;
}
}
return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
}
char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
const char *cap)
{
char *cap_eq;
int cap_size;
char **ptr;
int i, j;
if (!pmu_name || !cap)
return NULL;
cap_size = strlen(cap);
cap_eq = zalloc(cap_size + 2);
if (!cap_eq)
return NULL;
memcpy(cap_eq, cap, cap_size);
cap_eq[cap_size] = '=';
if (!strcmp(pmu_name, "cpu")) {
for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
free(cap_eq);
return &env->cpu_pmu_caps[i][cap_size + 1];
}
}
goto out;
}
for (i = 0; i < env->nr_pmus_with_caps; i++) {
if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
continue;
ptr = env->pmu_caps[i].caps;
for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {
if (!strncmp(ptr[j], cap_eq, cap_size + 1)) {
free(cap_eq);
return &ptr[j][cap_size + 1];
}
}
}
out:
free(cap_eq);
return NULL;
}
| linux-master | tools/perf/util/env.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel-bts.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
*/
#include <endian.h>
#include <errno.h>
#include <byteswap.h>
#include <inttypes.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
#include "color.h"
#include "evsel.h"
#include "evlist.h"
#include "machine.h"
#include "symbol.h"
#include "session.h"
#include "tool.h"
#include "thread.h"
#include "thread-stack.h"
#include "debug.h"
#include "tsc.h"
#include "auxtrace.h"
#include "intel-pt-decoder/intel-pt-insn-decoder.h"
#include "intel-bts.h"
#include "util/synthetic-events.h"
#define MAX_TIMESTAMP (~0ULL)
#define INTEL_BTS_ERR_NOINSN 5
#define INTEL_BTS_ERR_LOST 9
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define le64_to_cpu bswap_64
#else
#define le64_to_cpu
#endif
struct intel_bts {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
struct auxtrace_heap heap;
u32 auxtrace_type;
struct perf_session *session;
struct machine *machine;
bool sampling_mode;
bool snapshot_mode;
bool data_queued;
u32 pmu_type;
struct perf_tsc_conversion tc;
bool cap_user_time_zero;
struct itrace_synth_opts synth_opts;
bool sample_branches;
u32 branches_filter;
u64 branches_sample_type;
u64 branches_id;
size_t branches_event_size;
unsigned long num_events;
};
struct intel_bts_queue {
struct intel_bts *bts;
unsigned int queue_nr;
struct auxtrace_buffer *buffer;
bool on_heap;
bool done;
pid_t pid;
pid_t tid;
int cpu;
u64 time;
struct intel_pt_insn intel_pt_insn;
u32 sample_flags;
};
struct branch {
u64 from;
u64 to;
u64 misc;
};
static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
unsigned char *buf, size_t len)
{
struct branch *branch;
size_t i, pos = 0, br_sz = sizeof(struct branch), sz;
const char *color = PERF_COLOR_BLUE;
color_fprintf(stdout, color,
". ... Intel BTS data: size %zu bytes\n",
len);
while (len) {
if (len >= br_sz)
sz = br_sz;
else
sz = len;
printf(".");
color_fprintf(stdout, color, " %08x: ", pos);
for (i = 0; i < sz; i++)
color_fprintf(stdout, color, " %02x", buf[i]);
for (; i < br_sz; i++)
color_fprintf(stdout, color, " ");
if (len >= br_sz) {
branch = (struct branch *)buf;
color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n",
le64_to_cpu(branch->from),
le64_to_cpu(branch->to),
le64_to_cpu(branch->misc) & 0x10 ?
"pred" : "miss");
} else {
color_fprintf(stdout, color, " Bad record!\n");
}
pos += sz;
buf += sz;
len -= sz;
}
}
static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
size_t len)
{
printf(".\n");
intel_bts_dump(bts, buf, len);
}
static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
{
union perf_event event;
int err;
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
sample->tid, 0, "Lost trace data", sample->time);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
pr_err("Intel BTS: failed to deliver error event, error %d\n",
err);
return err;
}
static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
unsigned int queue_nr)
{
struct intel_bts_queue *btsq;
btsq = zalloc(sizeof(struct intel_bts_queue));
if (!btsq)
return NULL;
btsq->bts = bts;
btsq->queue_nr = queue_nr;
btsq->pid = -1;
btsq->tid = -1;
btsq->cpu = -1;
return btsq;
}
static int intel_bts_setup_queue(struct intel_bts *bts,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
struct intel_bts_queue *btsq = queue->priv;
if (list_empty(&queue->head))
return 0;
if (!btsq) {
btsq = intel_bts_alloc_queue(bts, queue_nr);
if (!btsq)
return -ENOMEM;
queue->priv = btsq;
if (queue->cpu != -1)
btsq->cpu = queue->cpu;
btsq->tid = queue->tid;
}
if (bts->sampling_mode)
return 0;
if (!btsq->on_heap && !btsq->buffer) {
int ret;
btsq->buffer = auxtrace_buffer__next(queue, NULL);
if (!btsq->buffer)
return 0;
ret = auxtrace_heap__add(&bts->heap, queue_nr,
btsq->buffer->reference);
if (ret)
return ret;
btsq->on_heap = true;
}
return 0;
}
static int intel_bts_setup_queues(struct intel_bts *bts)
{
unsigned int i;
int ret;
for (i = 0; i < bts->queues.nr_queues; i++) {
ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
i);
if (ret)
return ret;
}
return 0;
}
static inline int intel_bts_update_queues(struct intel_bts *bts)
{
if (bts->queues.new_data) {
bts->queues.new_data = false;
return intel_bts_setup_queues(bts);
}
return 0;
}
static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b)
{
size_t offs, len;
if (len_a > len_b)
offs = len_a - len_b;
else
offs = 0;
for (; offs < len_a; offs += sizeof(struct branch)) {
len = len_a - offs;
if (!memcmp(buf_a + offs, buf_b, len))
return buf_b + len;
}
return buf_b;
}
static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
struct auxtrace_buffer *b)
{
struct auxtrace_buffer *a;
void *start;
if (b->list.prev == &queue->head)
return 0;
a = list_entry(b->list.prev, struct auxtrace_buffer, list);
start = intel_bts_find_overlap(a->data, a->size, b->data, b->size);
if (!start)
return -EINVAL;
b->use_size = b->data + b->size - start;
b->use_data = start;
return 0;
}
static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip)
{
return machine__kernel_ip(bts->machine, ip) ?
PERF_RECORD_MISC_KERNEL :
PERF_RECORD_MISC_USER;
}
static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
struct branch *branch)
{
int ret;
struct intel_bts *bts = btsq->bts;
union perf_event event;
struct perf_sample sample = { .ip = 0, };
if (bts->synth_opts.initial_skip &&
bts->num_events++ <= bts->synth_opts.initial_skip)
return 0;
sample.ip = le64_to_cpu(branch->from);
sample.cpumode = intel_bts_cpumode(bts, sample.ip);
sample.pid = btsq->pid;
sample.tid = btsq->tid;
sample.addr = le64_to_cpu(branch->to);
sample.id = btsq->bts->branches_id;
sample.stream_id = btsq->bts->branches_id;
sample.period = 1;
sample.cpu = btsq->cpu;
sample.flags = btsq->sample_flags;
sample.insn_len = btsq->intel_pt_insn.length;
memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ);
event.sample.header.type = PERF_RECORD_SAMPLE;
event.sample.header.misc = sample.cpumode;
event.sample.header.size = sizeof(struct perf_event_header);
if (bts->synth_opts.inject) {
event.sample.header.size = bts->branches_event_size;
ret = perf_event__synthesize_sample(&event,
bts->branches_sample_type,
0, &sample);
if (ret)
return ret;
}
ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
if (ret)
pr_err("Intel BTS: failed to deliver branch event, error %d\n",
ret);
return ret;
}
static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
{
struct machine *machine = btsq->bts->machine;
struct thread *thread;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
bool x86_64;
int err = -1;
thread = machine__find_thread(machine, -1, btsq->tid);
if (!thread)
return -1;
len = thread__memcpy(thread, machine, buf, ip, INTEL_PT_INSN_BUF_SZ, &x86_64);
if (len <= 0)
goto out_put;
if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
goto out_put;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
pid_t tid, u64 ip)
{
union perf_event event;
int err;
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
"Failed to get instruction", 0);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
pr_err("Intel BTS: failed to deliver error event, error %d\n",
err);
return err;
}
static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
struct branch *branch)
{
int err;
if (!branch->from) {
if (branch->to)
btsq->sample_flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_TRACE_BEGIN;
else
btsq->sample_flags = 0;
btsq->intel_pt_insn.length = 0;
} else if (!branch->to) {
btsq->sample_flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_TRACE_END;
btsq->intel_pt_insn.length = 0;
} else {
err = intel_bts_get_next_insn(btsq, branch->from);
if (err) {
btsq->sample_flags = 0;
btsq->intel_pt_insn.length = 0;
if (!btsq->bts->synth_opts.errors)
return 0;
err = intel_bts_synth_error(btsq->bts, btsq->cpu,
btsq->pid, btsq->tid,
branch->from);
return err;
}
btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op);
/* Check for an async branch into the kernel */
if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
machine__kernel_ip(btsq->bts->machine, branch->to) &&
btsq->sample_flags != (PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_CALL |
PERF_IP_FLAG_SYSCALLRET))
btsq->sample_flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT;
}
return 0;
}
static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
struct auxtrace_buffer *buffer,
struct thread *thread)
{
struct branch *branch;
size_t sz, bsz = sizeof(struct branch);
u32 filter = btsq->bts->branches_filter;
int err = 0;
if (buffer->use_data) {
sz = buffer->use_size;
branch = buffer->use_data;
} else {
sz = buffer->size;
branch = buffer->data;
}
if (!btsq->bts->sample_branches)
return 0;
for (; sz > bsz; branch += 1, sz -= bsz) {
if (!branch->from && !branch->to)
continue;
intel_bts_get_branch_type(btsq, branch);
if (btsq->bts->synth_opts.thread_stack)
thread_stack__event(thread, btsq->cpu, btsq->sample_flags,
le64_to_cpu(branch->from),
le64_to_cpu(branch->to),
btsq->intel_pt_insn.length,
buffer->buffer_nr + 1, true, 0, 0);
if (filter && !(filter & btsq->sample_flags))
continue;
err = intel_bts_synth_branch_sample(btsq, branch);
if (err)
break;
}
return err;
}
static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
{
struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer;
struct auxtrace_queue *queue;
struct thread *thread;
int err;
if (btsq->done)
return 1;
if (btsq->pid == -1) {
thread = machine__find_thread(btsq->bts->machine, -1,
btsq->tid);
if (thread)
btsq->pid = thread__pid(thread);
} else {
thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
btsq->tid);
}
queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
if (!buffer)
buffer = auxtrace_buffer__next(queue, NULL);
if (!buffer) {
if (!btsq->bts->sampling_mode)
btsq->done = 1;
err = 1;
goto out_put;
}
/* Currently there is no support for split buffers */
if (buffer->consecutive) {
err = -EINVAL;
goto out_put;
}
if (!buffer->data) {
int fd = perf_data__fd(btsq->bts->session->data);
buffer->data = auxtrace_buffer__get_data(buffer, fd);
if (!buffer->data) {
err = -ENOMEM;
goto out_put;
}
}
if (btsq->bts->snapshot_mode && !buffer->consecutive &&
intel_bts_do_fix_overlap(queue, buffer)) {
err = -ENOMEM;
goto out_put;
}
if (!btsq->bts->synth_opts.callchain &&
!btsq->bts->synth_opts.thread_stack && thread &&
(!old_buffer || btsq->bts->sampling_mode ||
(btsq->bts->snapshot_mode && !buffer->consecutive)))
thread_stack__set_trace_nr(thread, btsq->cpu, buffer->buffer_nr + 1);
err = intel_bts_process_buffer(btsq, buffer, thread);
auxtrace_buffer__drop_data(buffer);
btsq->buffer = auxtrace_buffer__next(queue, buffer);
if (btsq->buffer) {
if (timestamp)
*timestamp = btsq->buffer->reference;
} else {
if (!btsq->bts->sampling_mode)
btsq->done = 1;
}
out_put:
thread__put(thread);
return err;
}
static int intel_bts_flush_queue(struct intel_bts_queue *btsq)
{
u64 ts = 0;
int ret;
while (1) {
ret = intel_bts_process_queue(btsq, &ts);
if (ret < 0)
return ret;
if (ret)
break;
}
return 0;
}
static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
{
struct auxtrace_queues *queues = &bts->queues;
unsigned int i;
for (i = 0; i < queues->nr_queues; i++) {
struct auxtrace_queue *queue = &bts->queues.queue_array[i];
struct intel_bts_queue *btsq = queue->priv;
if (btsq && btsq->tid == tid)
return intel_bts_flush_queue(btsq);
}
return 0;
}
static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
{
while (1) {
unsigned int queue_nr;
struct auxtrace_queue *queue;
struct intel_bts_queue *btsq;
u64 ts = 0;
int ret;
if (!bts->heap.heap_cnt)
return 0;
if (bts->heap.heap_array[0].ordinal > timestamp)
return 0;
queue_nr = bts->heap.heap_array[0].queue_nr;
queue = &bts->queues.queue_array[queue_nr];
btsq = queue->priv;
auxtrace_heap__pop(&bts->heap);
ret = intel_bts_process_queue(btsq, &ts);
if (ret < 0) {
auxtrace_heap__add(&bts->heap, queue_nr, ts);
return ret;
}
if (!ret) {
ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
if (ret < 0)
return ret;
} else {
btsq->on_heap = false;
}
}
return 0;
}
static int intel_bts_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
u64 timestamp;
int err;
if (dump_trace)
return 0;
if (!tool->ordered_events) {
pr_err("Intel BTS requires ordered events\n");
return -EINVAL;
}
if (sample->time && sample->time != (u64)-1)
timestamp = perf_time_to_tsc(sample->time, &bts->tc);
else
timestamp = 0;
err = intel_bts_update_queues(bts);
if (err)
return err;
err = intel_bts_process_queues(bts, timestamp);
if (err)
return err;
if (event->header.type == PERF_RECORD_EXIT) {
err = intel_bts_process_tid_exit(bts, event->fork.tid);
if (err)
return err;
}
if (event->header.type == PERF_RECORD_AUX &&
(event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
bts->synth_opts.errors)
err = intel_bts_lost(bts, sample);
return err;
}
static int intel_bts_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
if (bts->sampling_mode)
return 0;
if (!bts->data_queued) {
struct auxtrace_buffer *buffer;
off_t data_offset;
int fd = perf_data__fd(session->data);
int err;
if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
if (data_offset == -1)
return -errno;
}
err = auxtrace_queues__add_event(&bts->queues, session, event,
data_offset, &buffer);
if (err)
return err;
/* Dump here now we have copied a piped trace out of the pipe */
if (dump_trace) {
if (auxtrace_buffer__get_data(buffer, fd)) {
intel_bts_dump_event(bts, buffer->data,
buffer->size);
auxtrace_buffer__put_data(buffer);
}
}
}
return 0;
}
static int intel_bts_flush(struct perf_session *session,
struct perf_tool *tool __maybe_unused)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
int ret;
if (dump_trace || bts->sampling_mode)
return 0;
if (!tool->ordered_events)
return -EINVAL;
ret = intel_bts_update_queues(bts);
if (ret < 0)
return ret;
return intel_bts_process_queues(bts, MAX_TIMESTAMP);
}
static void intel_bts_free_queue(void *priv)
{
struct intel_bts_queue *btsq = priv;
if (!btsq)
return;
free(btsq);
}
static void intel_bts_free_events(struct perf_session *session)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
struct auxtrace_queues *queues = &bts->queues;
unsigned int i;
for (i = 0; i < queues->nr_queues; i++) {
intel_bts_free_queue(queues->queue_array[i].priv);
queues->queue_array[i].priv = NULL;
}
auxtrace_queues__free(queues);
}
static void intel_bts_free(struct perf_session *session)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
auxtrace_heap__free(&bts->heap);
intel_bts_free_events(session);
session->auxtrace = NULL;
free(bts);
}
static bool intel_bts_evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
return evsel->core.attr.type == bts->pmu_type;
}
struct intel_bts_synth {
struct perf_tool dummy_tool;
struct perf_session *session;
};
static int intel_bts_event_synth(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct intel_bts_synth *intel_bts_synth =
container_of(tool, struct intel_bts_synth, dummy_tool);
return perf_session__deliver_synth_event(intel_bts_synth->session,
event, NULL);
}
static int intel_bts_synth_event(struct perf_session *session,
struct perf_event_attr *attr, u64 id)
{
struct intel_bts_synth intel_bts_synth;
memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth));
intel_bts_synth.session = session;
return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1,
&id, intel_bts_event_synth);
}
static int intel_bts_synth_events(struct intel_bts *bts,
struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel;
struct perf_event_attr attr;
bool found = false;
u64 id;
int err;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == bts->pmu_type && evsel->core.ids) {
found = true;
break;
}
}
if (!found) {
pr_debug("There are no selected events with Intel BTS data\n");
return 0;
}
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.size = sizeof(struct perf_event_attr);
attr.type = PERF_TYPE_HARDWARE;
attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
PERF_SAMPLE_PERIOD;
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
attr.exclude_user = evsel->core.attr.exclude_user;
attr.exclude_kernel = evsel->core.attr.exclude_kernel;
attr.exclude_hv = evsel->core.attr.exclude_hv;
attr.exclude_host = evsel->core.attr.exclude_host;
attr.exclude_guest = evsel->core.attr.exclude_guest;
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
if (bts->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
err = intel_bts_synth_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'branches' event type\n",
__func__);
return err;
}
bts->sample_branches = true;
bts->branches_sample_type = attr.sample_type;
bts->branches_id = id;
/*
* We only use sample types from PERF_SAMPLE_MASK so we can use
* __evsel__sample_size() here.
*/
bts->branches_event_size = sizeof(struct perf_record_sample) +
__evsel__sample_size(attr.sample_type);
}
return 0;
}
static const char * const intel_bts_info_fmts[] = {
[INTEL_BTS_PMU_TYPE] = " PMU Type %"PRId64"\n",
[INTEL_BTS_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
[INTEL_BTS_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
[INTEL_BTS_TIME_ZERO] = " Time Zero %"PRIu64"\n",
[INTEL_BTS_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
[INTEL_BTS_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
};
static void intel_bts_print_info(__u64 *arr, int start, int finish)
{
int i;
if (!dump_trace)
return;
for (i = start; i <= finish; i++)
fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
}
int intel_bts_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE;
struct intel_bts *bts;
int err;
if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
min_sz)
return -EINVAL;
bts = zalloc(sizeof(struct intel_bts));
if (!bts)
return -ENOMEM;
err = auxtrace_queues__init(&bts->queues);
if (err)
goto err_free;
bts->session = session;
bts->machine = &session->machines.host; /* No kvm support */
bts->auxtrace_type = auxtrace_info->type;
bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
bts->cap_user_time_zero =
auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO];
bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
bts->sampling_mode = false;
bts->auxtrace.process_event = intel_bts_process_event;
bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
bts->auxtrace.flush_events = intel_bts_flush;
bts->auxtrace.free_events = intel_bts_free_events;
bts->auxtrace.free = intel_bts_free;
bts->auxtrace.evsel_is_auxtrace = intel_bts_evsel_is_auxtrace;
session->auxtrace = &bts->auxtrace;
intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE,
INTEL_BTS_SNAPSHOT_MODE);
if (dump_trace)
return 0;
if (session->itrace_synth_opts->set) {
bts->synth_opts = *session->itrace_synth_opts;
} else {
itrace_synth_opts__set_default(&bts->synth_opts,
session->itrace_synth_opts->default_no_sample);
bts->synth_opts.thread_stack =
session->itrace_synth_opts->thread_stack;
}
if (bts->synth_opts.calls)
bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_TRACE_END;
if (bts->synth_opts.returns)
bts->branches_filter |= PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_TRACE_BEGIN;
err = intel_bts_synth_events(bts, session);
if (err)
goto err_free_queues;
err = auxtrace_queues__process_index(&bts->queues, session);
if (err)
goto err_free_queues;
if (bts->queues.populated)
bts->data_queued = true;
return 0;
err_free_queues:
auxtrace_queues__free(&bts->queues);
session->auxtrace = NULL;
err_free:
free(bts);
return err;
}
| linux-master | tools/perf/util/intel-bts.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, Intel Corporation.
*/
/* Manage metrics and groups of metrics from JSON files */
#include "metricgroup.h"
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
#include "strbuf.h"
#include "pmu.h"
#include "pmus.h"
#include "print-events.h"
#include "smt.h"
#include "expr.h"
#include "rblist.h"
#include <string.h>
#include <errno.h>
#include "strlist.h"
#include <assert.h>
#include <linux/ctype.h>
#include <linux/list_sort.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <perf/cpumap.h>
#include <subcmd/parse-options.h>
#include <api/fs/fs.h>
#include "util.h"
#include <asm/bug.h>
#include "cgroup.h"
#include "util/hashmap.h"
struct metric_event *metricgroup__lookup(struct rblist *metric_events,
struct evsel *evsel,
bool create)
{
struct rb_node *nd;
struct metric_event me = {
.evsel = evsel
};
if (!metric_events)
return NULL;
nd = rblist__find(metric_events, &me);
if (nd)
return container_of(nd, struct metric_event, nd);
if (create) {
rblist__add_node(metric_events, &me);
nd = rblist__find(metric_events, &me);
if (nd)
return container_of(nd, struct metric_event, nd);
}
return NULL;
}
static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
{
struct metric_event *a = container_of(rb_node,
struct metric_event,
nd);
const struct metric_event *b = entry;
if (a->evsel == b->evsel)
return 0;
if ((char *)a->evsel < (char *)b->evsel)
return -1;
return +1;
}
static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
const void *entry)
{
struct metric_event *me = malloc(sizeof(struct metric_event));
if (!me)
return NULL;
memcpy(me, entry, sizeof(struct metric_event));
me->evsel = ((struct metric_event *)entry)->evsel;
me->is_default = false;
INIT_LIST_HEAD(&me->head);
return &me->nd;
}
static void metric_event_delete(struct rblist *rblist __maybe_unused,
struct rb_node *rb_node)
{
struct metric_event *me = container_of(rb_node, struct metric_event, nd);
struct metric_expr *expr, *tmp;
list_for_each_entry_safe(expr, tmp, &me->head, nd) {
zfree(&expr->metric_name);
zfree(&expr->metric_refs);
zfree(&expr->metric_events);
free(expr);
}
free(me);
}
static void metricgroup__rblist_init(struct rblist *metric_events)
{
rblist__init(metric_events);
metric_events->node_cmp = metric_event_cmp;
metric_events->node_new = metric_event_new;
metric_events->node_delete = metric_event_delete;
}
void metricgroup__rblist_exit(struct rblist *metric_events)
{
rblist__exit(metric_events);
}
/**
* The metric under construction. The data held here will be placed in a
* metric_expr.
*/
struct metric {
struct list_head nd;
/**
* The expression parse context importantly holding the IDs contained
* within the expression.
*/
struct expr_parse_ctx *pctx;
const char *pmu;
/** The name of the metric such as "IPC". */
const char *metric_name;
/** Modifier on the metric such as "u" or NULL for none. */
const char *modifier;
/** The expression to parse, for example, "instructions/cycles". */
const char *metric_expr;
/** Optional threshold expression where zero value is green, otherwise red. */
const char *metric_threshold;
/**
* The "ScaleUnit" that scales and adds a unit to the metric during
* output.
*/
const char *metric_unit;
/**
* Optional name of the metric group reported
* if the Default metric group is being processed.
*/
const char *default_metricgroup_name;
/** Optional null terminated array of referenced metrics. */
struct metric_ref *metric_refs;
/**
* Should events of the metric be grouped?
*/
bool group_events;
/**
* Parsed events for the metric. Optional as events may be taken from a
* different metric whose group contains all the IDs necessary for this
* one.
*/
struct evlist *evlist;
};
static void metric__watchdog_constraint_hint(const char *name, bool foot)
{
static bool violate_nmi_constraint;
if (!foot) {
pr_warning("Not grouping metric %s's events.\n", name);
violate_nmi_constraint = true;
return;
}
if (!violate_nmi_constraint)
return;
pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
" perf stat ...\n"
" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
}
static bool metric__group_events(const struct pmu_metric *pm)
{
switch (pm->event_grouping) {
case MetricNoGroupEvents:
return false;
case MetricNoGroupEventsNmi:
if (!sysctl__nmi_watchdog_enabled())
return true;
metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
return false;
case MetricNoGroupEventsSmt:
return !smt_on();
case MetricGroupEvents:
default:
return true;
}
}
static void metric__free(struct metric *m)
{
if (!m)
return;
zfree(&m->metric_refs);
expr__ctx_free(m->pctx);
zfree(&m->modifier);
evlist__delete(m->evlist);
free(m);
}
static struct metric *metric__new(const struct pmu_metric *pm,
const char *modifier,
bool metric_no_group,
int runtime,
const char *user_requested_cpu_list,
bool system_wide)
{
struct metric *m;
m = zalloc(sizeof(*m));
if (!m)
return NULL;
m->pctx = expr__ctx_new();
if (!m->pctx)
goto out_err;
m->pmu = pm->pmu ?: "cpu";
m->metric_name = pm->metric_name;
m->default_metricgroup_name = pm->default_metricgroup_name;
m->modifier = NULL;
if (modifier) {
m->modifier = strdup(modifier);
if (!m->modifier)
goto out_err;
}
m->metric_expr = pm->metric_expr;
m->metric_threshold = pm->metric_threshold;
m->metric_unit = pm->unit;
m->pctx->sctx.user_requested_cpu_list = NULL;
if (user_requested_cpu_list) {
m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
if (!m->pctx->sctx.user_requested_cpu_list)
goto out_err;
}
m->pctx->sctx.runtime = runtime;
m->pctx->sctx.system_wide = system_wide;
m->group_events = !metric_no_group && metric__group_events(pm);
m->metric_refs = NULL;
m->evlist = NULL;
return m;
out_err:
metric__free(m);
return NULL;
}
static bool contains_metric_id(struct evsel **metric_events, int num_events,
const char *metric_id)
{
int i;
for (i = 0; i < num_events; i++) {
if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
return true;
}
return false;
}
/**
* setup_metric_events - Find a group of events in metric_evlist that correspond
* to the IDs from a parsed metric expression.
* @pmu: The PMU for the IDs.
* @ids: the metric IDs to match.
* @metric_evlist: the list of perf events.
* @out_metric_events: holds the created metric events array.
*/
static int setup_metric_events(const char *pmu, struct hashmap *ids,
struct evlist *metric_evlist,
struct evsel ***out_metric_events)
{
struct evsel **metric_events;
const char *metric_id;
struct evsel *ev;
size_t ids_size, matched_events, i;
bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
*out_metric_events = NULL;
ids_size = hashmap__size(ids);
metric_events = calloc(sizeof(void *), ids_size + 1);
if (!metric_events)
return -ENOMEM;
matched_events = 0;
evlist__for_each_entry(metric_evlist, ev) {
struct expr_id_data *val_ptr;
/* Don't match events for the wrong hybrid PMU. */
if (!all_pmus && ev->pmu_name && evsel__is_hybrid(ev) &&
strcmp(ev->pmu_name, pmu))
continue;
/*
* Check for duplicate events with the same name. For
* example, uncore_imc/cas_count_read/ will turn into 6
* events per socket on skylakex. Only the first such
* event is placed in metric_events.
*/
metric_id = evsel__metric_id(ev);
if (contains_metric_id(metric_events, matched_events, metric_id))
continue;
/*
* Does this event belong to the parse context? For
* combined or shared groups, this metric may not care
* about this event.
*/
if (hashmap__find(ids, metric_id, &val_ptr)) {
pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
metric_events[matched_events++] = ev;
if (matched_events >= ids_size)
break;
}
}
if (matched_events < ids_size) {
free(metric_events);
return -EINVAL;
}
for (i = 0; i < ids_size; i++) {
ev = metric_events[i];
ev->collect_stat = true;
/*
* The metric leader points to the identically named
* event in metric_events.
*/
ev->metric_leader = ev;
/*
* Mark two events with identical names in the same
* group (or globally) as being in use as uncore events
* may be duplicated for each pmu. Set the metric leader
* of such events to be the event that appears in
* metric_events.
*/
metric_id = evsel__metric_id(ev);
evlist__for_each_entry_continue(metric_evlist, ev) {
if (!strcmp(evsel__metric_id(ev), metric_id))
ev->metric_leader = metric_events[i];
}
}
*out_metric_events = metric_events;
return 0;
}
static bool match_metric(const char *n, const char *list)
{
int len;
char *m;
if (!list)
return false;
if (!strcmp(list, "all"))
return true;
if (!n)
return !strcasecmp(list, "No_group");
len = strlen(list);
m = strcasestr(n, list);
if (!m)
return false;
if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
(m[len] == 0 || m[len] == ';'))
return true;
return false;
}
static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric)
{
const char *pm_pmu = pm->pmu ?: "cpu";
if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
return false;
return match_metric(pm->metric_group, metric) ||
match_metric(pm->metric_name, metric);
}
/** struct mep - RB-tree node for building printing information. */
struct mep {
/** nd - RB-tree element. */
struct rb_node nd;
/** @metric_group: Owned metric group name, separated others with ';'. */
char *metric_group;
const char *metric_name;
const char *metric_desc;
const char *metric_long_desc;
const char *metric_expr;
const char *metric_threshold;
const char *metric_unit;
};
static int mep_cmp(struct rb_node *rb_node, const void *entry)
{
struct mep *a = container_of(rb_node, struct mep, nd);
struct mep *b = (struct mep *)entry;
int ret;
ret = strcmp(a->metric_group, b->metric_group);
if (ret)
return ret;
return strcmp(a->metric_name, b->metric_name);
}
static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
{
struct mep *me = malloc(sizeof(struct mep));
if (!me)
return NULL;
memcpy(me, entry, sizeof(struct mep));
return &me->nd;
}
static void mep_delete(struct rblist *rl __maybe_unused,
struct rb_node *nd)
{
struct mep *me = container_of(nd, struct mep, nd);
zfree(&me->metric_group);
free(me);
}
static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
const char *metric_name)
{
struct rb_node *nd;
struct mep me = {
.metric_group = strdup(metric_group),
.metric_name = metric_name,
};
nd = rblist__find(groups, &me);
if (nd) {
free(me.metric_group);
return container_of(nd, struct mep, nd);
}
rblist__add_node(groups, &me);
nd = rblist__find(groups, &me);
if (nd)
return container_of(nd, struct mep, nd);
return NULL;
}
static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
struct rblist *groups)
{
const char *g;
char *omg, *mg;
mg = strdup(pm->metric_group ?: "No_group");
if (!mg)
return -ENOMEM;
omg = mg;
while ((g = strsep(&mg, ";")) != NULL) {
struct mep *me;
g = skip_spaces(g);
if (strlen(g))
me = mep_lookup(groups, g, pm->metric_name);
else
me = mep_lookup(groups, "No_group", pm->metric_name);
if (me) {
me->metric_desc = pm->desc;
me->metric_long_desc = pm->long_desc;
me->metric_expr = pm->metric_expr;
me->metric_threshold = pm->metric_threshold;
me->metric_unit = pm->unit;
}
}
free(omg);
return 0;
}
struct metricgroup_iter_data {
pmu_metric_iter_fn fn;
void *data;
};
static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
const struct pmu_metrics_table *table,
void *data)
{
struct metricgroup_iter_data *d = data;
struct perf_pmu *pmu = NULL;
if (!pm->metric_expr || !pm->compat)
return 0;
while ((pmu = perf_pmus__scan(pmu))) {
if (!pmu->id || strcmp(pmu->id, pm->compat))
continue;
return d->fn(pm, table, d->data);
}
return 0;
}
static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table __maybe_unused,
void *vdata)
{
struct rblist *groups = vdata;
return metricgroup__add_to_mep_groups(pm, groups);
}
void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
{
struct rblist groups;
const struct pmu_metrics_table *table;
struct rb_node *node, *next;
rblist__init(&groups);
groups.node_new = mep_new;
groups.node_cmp = mep_cmp;
groups.node_delete = mep_delete;
table = pmu_metrics_table__find();
if (table) {
pmu_metrics_table__for_each_metric(table,
metricgroup__add_to_mep_groups_callback,
&groups);
}
{
struct metricgroup_iter_data data = {
.fn = metricgroup__add_to_mep_groups_callback,
.data = &groups,
};
pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
}
for (node = rb_first_cached(&groups.entries); node; node = next) {
struct mep *me = container_of(node, struct mep, nd);
print_cb->print_metric(print_state,
me->metric_group,
me->metric_name,
me->metric_desc,
me->metric_long_desc,
me->metric_expr,
me->metric_threshold,
me->metric_unit);
next = rb_next(node);
rblist__remove_node(&groups, node);
}
}
static const char *code_characters = ",-=@";
static int encode_metric_id(struct strbuf *sb, const char *x)
{
char *c;
int ret = 0;
for (; *x; x++) {
c = strchr(code_characters, *x);
if (c) {
ret = strbuf_addch(sb, '!');
if (ret)
break;
ret = strbuf_addch(sb, '0' + (c - code_characters));
if (ret)
break;
} else {
ret = strbuf_addch(sb, *x);
if (ret)
break;
}
}
return ret;
}
static int decode_metric_id(struct strbuf *sb, const char *x)
{
const char *orig = x;
size_t i;
char c;
int ret;
for (; *x; x++) {
c = *x;
if (*x == '!') {
x++;
i = *x - '0';
if (i > strlen(code_characters)) {
pr_err("Bad metric-id encoding in: '%s'", orig);
return -1;
}
c = code_characters[i];
}
ret = strbuf_addch(sb, c);
if (ret)
return ret;
}
return 0;
}
static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
{
struct evsel *ev;
struct strbuf sb = STRBUF_INIT;
char *cur;
int ret = 0;
evlist__for_each_entry(perf_evlist, ev) {
if (!ev->metric_id)
continue;
ret = strbuf_setlen(&sb, 0);
if (ret)
break;
ret = decode_metric_id(&sb, ev->metric_id);
if (ret)
break;
free((char *)ev->metric_id);
ev->metric_id = strdup(sb.buf);
if (!ev->metric_id) {
ret = -ENOMEM;
break;
}
/*
* If the name is just the parsed event, use the metric-id to
* give a more friendly display version.
*/
if (strstr(ev->name, "metric-id=")) {
bool has_slash = false;
zfree(&ev->name);
for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
*cur = '/';
has_slash = true;
}
if (modifier) {
if (!has_slash && !strchr(sb.buf, ':')) {
ret = strbuf_addch(&sb, ':');
if (ret)
break;
}
ret = strbuf_addstr(&sb, modifier);
if (ret)
break;
}
ev->name = strdup(sb.buf);
if (!ev->name) {
ret = -ENOMEM;
break;
}
}
}
strbuf_release(&sb);
return ret;
}
static int metricgroup__build_event_string(struct strbuf *events,
const struct expr_parse_ctx *ctx,
const char *modifier,
bool group_events)
{
struct hashmap_entry *cur;
size_t bkt;
bool no_group = true, has_tool_events = false;
bool tool_events[PERF_TOOL_MAX] = {false};
int ret = 0;
#define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
hashmap__for_each_entry(ctx->ids, cur, bkt) {
const char *sep, *rsep, *id = cur->pkey;
enum perf_tool_event ev;
pr_debug("found event %s\n", id);
/* Always move tool events outside of the group. */
ev = perf_tool_event__from_str(id);
if (ev != PERF_TOOL_NONE) {
has_tool_events = true;
tool_events[ev] = true;
continue;
}
/* Separate events with commas and open the group if necessary. */
if (no_group) {
if (group_events) {
ret = strbuf_addch(events, '{');
RETURN_IF_NON_ZERO(ret);
}
no_group = false;
} else {
ret = strbuf_addch(events, ',');
RETURN_IF_NON_ZERO(ret);
}
/*
* Encode the ID as an event string. Add a qualifier for
* metric_id that is the original name except with characters
* that parse-events can't parse replaced. For example,
* 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
*/
sep = strchr(id, '@');
if (sep != NULL) {
ret = strbuf_add(events, id, sep - id);
RETURN_IF_NON_ZERO(ret);
ret = strbuf_addch(events, '/');
RETURN_IF_NON_ZERO(ret);
rsep = strrchr(sep, '@');
ret = strbuf_add(events, sep + 1, rsep - sep - 1);
RETURN_IF_NON_ZERO(ret);
ret = strbuf_addstr(events, ",metric-id=");
RETURN_IF_NON_ZERO(ret);
sep = rsep;
} else {
sep = strchr(id, ':');
if (sep != NULL) {
ret = strbuf_add(events, id, sep - id);
RETURN_IF_NON_ZERO(ret);
} else {
ret = strbuf_addstr(events, id);
RETURN_IF_NON_ZERO(ret);
}
ret = strbuf_addstr(events, "/metric-id=");
RETURN_IF_NON_ZERO(ret);
}
ret = encode_metric_id(events, id);
RETURN_IF_NON_ZERO(ret);
ret = strbuf_addstr(events, "/");
RETURN_IF_NON_ZERO(ret);
if (sep != NULL) {
ret = strbuf_addstr(events, sep + 1);
RETURN_IF_NON_ZERO(ret);
}
if (modifier) {
ret = strbuf_addstr(events, modifier);
RETURN_IF_NON_ZERO(ret);
}
}
if (!no_group && group_events) {
ret = strbuf_addf(events, "}:W");
RETURN_IF_NON_ZERO(ret);
}
if (has_tool_events) {
int i;
perf_tool_event__for_each_event(i) {
if (tool_events[i]) {
if (!no_group) {
ret = strbuf_addch(events, ',');
RETURN_IF_NON_ZERO(ret);
}
no_group = false;
ret = strbuf_addstr(events, perf_tool_event__to_str(i));
RETURN_IF_NON_ZERO(ret);
}
}
}
return ret;
#undef RETURN_IF_NON_ZERO
}
int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
{
return 1;
}
/*
* A singly linked list on the stack of the names of metrics being
* processed. Used to identify recursion.
*/
struct visited_metric {
const char *name;
const struct visited_metric *parent;
};
struct metricgroup_add_iter_data {
struct list_head *metric_list;
const char *pmu;
const char *metric_name;
const char *modifier;
int *ret;
bool *has_match;
bool metric_no_group;
bool metric_no_threshold;
const char *user_requested_cpu_list;
bool system_wide;
struct metric *root_metric;
const struct visited_metric *visited;
const struct pmu_metrics_table *table;
};
static bool metricgroup__find_metric(const char *pmu,
const char *metric,
const struct pmu_metrics_table *table,
struct pmu_metric *pm);
static int add_metric(struct list_head *metric_list,
const struct pmu_metric *pm,
const char *modifier,
bool metric_no_group,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
struct metric *root_metric,
const struct visited_metric *visited,
const struct pmu_metrics_table *table);
/**
* resolve_metric - Locate metrics within the root metric and recursively add
* references to them.
* @metric_list: The list the metric is added to.
* @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
* @modifier: if non-null event modifiers like "u".
* @metric_no_group: Should events written to events be grouped "{}" or
* global. Grouping is the default but due to multiplexing the
* user may override.
* @user_requested_cpu_list: Command line specified CPUs to record on.
* @system_wide: Are events for all processes recorded.
* @root_metric: Metrics may reference other metrics to form a tree. In this
* case the root_metric holds all the IDs and a list of referenced
* metrics. When adding a root this argument is NULL.
* @visited: A singly linked list of metric names being added that is used to
* detect recursion.
* @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int resolve_metric(struct list_head *metric_list,
const char *pmu,
const char *modifier,
bool metric_no_group,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
struct metric *root_metric,
const struct visited_metric *visited,
const struct pmu_metrics_table *table)
{
struct hashmap_entry *cur;
size_t bkt;
struct to_resolve {
/* The metric to resolve. */
struct pmu_metric pm;
/*
* The key in the IDs map, this may differ from in case,
* etc. from pm->metric_name.
*/
const char *key;
} *pending = NULL;
int i, ret = 0, pending_cnt = 0;
/*
* Iterate all the parsed IDs and if there's a matching metric and it to
* the pending array.
*/
hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
struct pmu_metric pm;
if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) {
pending = realloc(pending,
(pending_cnt + 1) * sizeof(struct to_resolve));
if (!pending)
return -ENOMEM;
memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
pending[pending_cnt].key = cur->pkey;
pending_cnt++;
}
}
/* Remove the metric IDs from the context. */
for (i = 0; i < pending_cnt; i++)
expr__del_id(root_metric->pctx, pending[i].key);
/*
* Recursively add all the metrics, IDs are added to the root metric's
* context.
*/
for (i = 0; i < pending_cnt; i++) {
ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
metric_no_threshold, user_requested_cpu_list, system_wide,
root_metric, visited, table);
if (ret)
break;
}
free(pending);
return ret;
}
/**
* __add_metric - Add a metric to metric_list.
* @metric_list: The list the metric is added to.
* @pm: The pmu_metric containing the metric to be added.
* @modifier: if non-null event modifiers like "u".
* @metric_no_group: Should events written to events be grouped "{}" or
* global. Grouping is the default but due to multiplexing the
* user may override.
* @metric_no_threshold: Should threshold expressions be ignored?
* @runtime: A special argument for the parser only known at runtime.
* @user_requested_cpu_list: Command line specified CPUs to record on.
* @system_wide: Are events for all processes recorded.
* @root_metric: Metrics may reference other metrics to form a tree. In this
* case the root_metric holds all the IDs and a list of referenced
* metrics. When adding a root this argument is NULL.
* @visited: A singly linked list of metric names being added that is used to
* detect recursion.
* @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int __add_metric(struct list_head *metric_list,
const struct pmu_metric *pm,
const char *modifier,
bool metric_no_group,
bool metric_no_threshold,
int runtime,
const char *user_requested_cpu_list,
bool system_wide,
struct metric *root_metric,
const struct visited_metric *visited,
const struct pmu_metrics_table *table)
{
const struct visited_metric *vm;
int ret;
bool is_root = !root_metric;
const char *expr;
struct visited_metric visited_node = {
.name = pm->metric_name,
.parent = visited,
};
for (vm = visited; vm; vm = vm->parent) {
if (!strcmp(pm->metric_name, vm->name)) {
pr_err("failed: recursion detected for %s\n", pm->metric_name);
return -1;
}
}
if (is_root) {
/*
* This metric is the root of a tree and may reference other
* metrics that are added recursively.
*/
root_metric = metric__new(pm, modifier, metric_no_group, runtime,
user_requested_cpu_list, system_wide);
if (!root_metric)
return -ENOMEM;
} else {
int cnt = 0;
/*
* This metric was referenced in a metric higher in the
* tree. Check if the same metric is already resolved in the
* metric_refs list.
*/
if (root_metric->metric_refs) {
for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
if (!strcmp(pm->metric_name,
root_metric->metric_refs[cnt].metric_name))
return 0;
}
}
/* Create reference. Need space for the entry and the terminator. */
root_metric->metric_refs = realloc(root_metric->metric_refs,
(cnt + 2) * sizeof(struct metric_ref));
if (!root_metric->metric_refs)
return -ENOMEM;
/*
* Intentionally passing just const char pointers,
* from 'pe' object, so they never go away. We don't
* need to change them, so there's no need to create
* our own copy.
*/
root_metric->metric_refs[cnt].metric_name = pm->metric_name;
root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
/* Null terminate array. */
root_metric->metric_refs[cnt+1].metric_name = NULL;
root_metric->metric_refs[cnt+1].metric_expr = NULL;
}
/*
* For both the parent and referenced metrics, we parse
* all the metric's IDs and add it to the root context.
*/
ret = 0;
expr = pm->metric_expr;
if (is_root && pm->metric_threshold) {
/*
* Threshold expressions are built off the actual metric. Switch
* to use that in case of additional necessary events. Change
* the visited node name to avoid this being flagged as
* recursion. If the threshold events are disabled, just use the
* metric's name as a reference. This allows metric threshold
* computation if there are sufficient events.
*/
assert(strstr(pm->metric_threshold, pm->metric_name));
expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
visited_node.name = "__threshold__";
}
if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
/* Broken metric. */
ret = -EINVAL;
}
if (!ret) {
/* Resolve referenced metrics. */
const char *pmu = pm->pmu ?: "cpu";
ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
metric_no_threshold, user_requested_cpu_list,
system_wide, root_metric, &visited_node,
table);
}
if (ret) {
if (is_root)
metric__free(root_metric);
} else if (is_root)
list_add(&root_metric->nd, metric_list);
return ret;
}
struct metricgroup__find_metric_data {
const char *pmu;
const char *metric;
struct pmu_metric *pm;
};
static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table __maybe_unused,
void *vdata)
{
struct metricgroup__find_metric_data *data = vdata;
const char *pm_pmu = pm->pmu ?: "cpu";
if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu))
return 0;
if (!match_metric(pm->metric_name, data->metric))
return 0;
memcpy(data->pm, pm, sizeof(*pm));
return 1;
}
static bool metricgroup__find_metric(const char *pmu,
const char *metric,
const struct pmu_metrics_table *table,
struct pmu_metric *pm)
{
struct metricgroup__find_metric_data data = {
.pmu = pmu,
.metric = metric,
.pm = pm,
};
return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
? true : false;
}
static int add_metric(struct list_head *metric_list,
const struct pmu_metric *pm,
const char *modifier,
bool metric_no_group,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
struct metric *root_metric,
const struct visited_metric *visited,
const struct pmu_metrics_table *table)
{
int ret = 0;
pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
if (!strstr(pm->metric_expr, "?")) {
ret = __add_metric(metric_list, pm, modifier, metric_no_group,
metric_no_threshold, 0, user_requested_cpu_list,
system_wide, root_metric, visited, table);
} else {
int j, count;
count = arch_get_runtimeparam(pm);
/* This loop is added to create multiple
* events depend on count value and add
* those events to metric_list.
*/
for (j = 0; j < count && !ret; j++)
ret = __add_metric(metric_list, pm, modifier, metric_no_group,
metric_no_threshold, j, user_requested_cpu_list,
system_wide, root_metric, visited, table);
}
return ret;
}
static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
const struct pmu_metrics_table *table __maybe_unused,
void *data)
{
struct metricgroup_add_iter_data *d = data;
int ret;
if (!match_pm_metric(pm, d->pmu, d->metric_name))
return 0;
ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
d->metric_no_threshold, d->user_requested_cpu_list,
d->system_wide, d->root_metric, d->visited, d->table);
if (ret)
goto out;
*(d->has_match) = true;
out:
*(d->ret) = ret;
return ret;
}
/**
* metric_list_cmp - list_sort comparator that sorts metrics with more events to
* the front. tool events are excluded from the count.
*/
static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
const struct list_head *r)
{
const struct metric *left = container_of(l, struct metric, nd);
const struct metric *right = container_of(r, struct metric, nd);
struct expr_id_data *data;
int i, left_count, right_count;
left_count = hashmap__size(left->pctx->ids);
perf_tool_event__for_each_event(i) {
if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
left_count--;
}
right_count = hashmap__size(right->pctx->ids);
perf_tool_event__for_each_event(i) {
if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
right_count--;
}
return right_count - left_count;
}
/**
* default_metricgroup_cmp - Implements complex key for the Default metricgroup
* that first sorts by default_metricgroup_name, then
* metric_name.
*/
static int default_metricgroup_cmp(void *priv __maybe_unused,
const struct list_head *l,
const struct list_head *r)
{
const struct metric *left = container_of(l, struct metric, nd);
const struct metric *right = container_of(r, struct metric, nd);
int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
if (diff)
return diff;
return strcmp(right->metric_name, left->metric_name);
}
struct metricgroup__add_metric_data {
struct list_head *list;
const char *pmu;
const char *metric_name;
const char *modifier;
const char *user_requested_cpu_list;
bool metric_no_group;
bool metric_no_threshold;
bool system_wide;
bool has_match;
};
static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table,
void *vdata)
{
struct metricgroup__add_metric_data *data = vdata;
int ret = 0;
if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) {
bool metric_no_group = data->metric_no_group ||
match_metric(pm->metricgroup_no_group, data->metric_name);
data->has_match = true;
ret = add_metric(data->list, pm, data->modifier, metric_no_group,
data->metric_no_threshold, data->user_requested_cpu_list,
data->system_wide, /*root_metric=*/NULL,
/*visited_metrics=*/NULL, table);
}
return ret;
}
/**
* metricgroup__add_metric - Find and add a metric, or a metric group.
* @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
* @metric_name: The name of the metric or metric group. For example, "IPC"
* could be the name of a metric and "TopDownL1" the name of a
* metric group.
* @modifier: if non-null event modifiers like "u".
* @metric_no_group: Should events written to events be grouped "{}" or
* global. Grouping is the default but due to multiplexing the
* user may override.
* @user_requested_cpu_list: Command line specified CPUs to record on.
* @system_wide: Are events for all processes recorded.
* @metric_list: The list that the metric or metric group are added to.
* @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
bool metric_no_group, bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
struct list_head *metric_list,
const struct pmu_metrics_table *table)
{
LIST_HEAD(list);
int ret;
bool has_match = false;
{
struct metricgroup__add_metric_data data = {
.list = &list,
.pmu = pmu,
.metric_name = metric_name,
.modifier = modifier,
.metric_no_group = metric_no_group,
.metric_no_threshold = metric_no_threshold,
.user_requested_cpu_list = user_requested_cpu_list,
.system_wide = system_wide,
.has_match = false,
};
/*
* Iterate over all metrics seeing if metric matches either the
* name or group. When it does add the metric to the list.
*/
ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
&data);
if (ret)
goto out;
has_match = data.has_match;
}
{
struct metricgroup_iter_data data = {
.fn = metricgroup__add_metric_sys_event_iter,
.data = (void *) &(struct metricgroup_add_iter_data) {
.metric_list = &list,
.pmu = pmu,
.metric_name = metric_name,
.modifier = modifier,
.metric_no_group = metric_no_group,
.user_requested_cpu_list = user_requested_cpu_list,
.system_wide = system_wide,
.has_match = &has_match,
.ret = &ret,
.table = table,
},
};
pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
}
/* End of pmu events. */
if (!has_match)
ret = -EINVAL;
out:
/*
* add to metric_list so that they can be released
* even if it's failed
*/
list_splice(&list, metric_list);
return ret;
}
/**
* metricgroup__add_metric_list - Find and add metrics, or metric groups,
* specified in a list.
* @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
* @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
* would match the IPC and CPI metrics, and TopDownL1 would match all
* the metrics in the TopDownL1 group.
* @metric_no_group: Should events written to events be grouped "{}" or
* global. Grouping is the default but due to multiplexing the
* user may override.
* @user_requested_cpu_list: Command line specified CPUs to record on.
* @system_wide: Are events for all processes recorded.
* @metric_list: The list that metrics are added to.
* @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int metricgroup__add_metric_list(const char *pmu, const char *list,
bool metric_no_group,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide, struct list_head *metric_list,
const struct pmu_metrics_table *table)
{
char *list_itr, *list_copy, *metric_name, *modifier;
int ret, count = 0;
list_copy = strdup(list);
if (!list_copy)
return -ENOMEM;
list_itr = list_copy;
while ((metric_name = strsep(&list_itr, ",")) != NULL) {
modifier = strchr(metric_name, ':');
if (modifier)
*modifier++ = '\0';
ret = metricgroup__add_metric(pmu, metric_name, modifier,
metric_no_group, metric_no_threshold,
user_requested_cpu_list,
system_wide, metric_list, table);
if (ret == -EINVAL)
pr_err("Cannot find metric or group `%s'\n", metric_name);
if (ret)
break;
count++;
}
free(list_copy);
if (!ret) {
/*
* Warn about nmi_watchdog if any parsed metrics had the
* NO_NMI_WATCHDOG constraint.
*/
metric__watchdog_constraint_hint(NULL, /*foot=*/true);
/* No metrics. */
if (count == 0)
return -EINVAL;
}
return ret;
}
static void metricgroup__free_metrics(struct list_head *metric_list)
{
struct metric *m, *tmp;
list_for_each_entry_safe (m, tmp, metric_list, nd) {
list_del_init(&m->nd);
metric__free(m);
}
}
/**
* find_tool_events - Search for the pressence of tool events in metric_list.
* @metric_list: List to take metrics from.
* @tool_events: Array of false values, indices corresponding to tool events set
* to true if tool event is found.
*/
static void find_tool_events(const struct list_head *metric_list,
bool tool_events[PERF_TOOL_MAX])
{
struct metric *m;
list_for_each_entry(m, metric_list, nd) {
int i;
perf_tool_event__for_each_event(i) {
struct expr_id_data *data;
if (!tool_events[i] &&
!expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
tool_events[i] = true;
}
}
}
/**
* build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
* metric IDs, as the IDs are held in a set,
* duplicates will be removed.
* @metric_list: List to take metrics from.
* @combined: Out argument for result.
*/
static int build_combined_expr_ctx(const struct list_head *metric_list,
struct expr_parse_ctx **combined)
{
struct hashmap_entry *cur;
size_t bkt;
struct metric *m;
char *dup;
int ret;
*combined = expr__ctx_new();
if (!*combined)
return -ENOMEM;
list_for_each_entry(m, metric_list, nd) {
if (!m->group_events && !m->modifier) {
hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
dup = strdup(cur->pkey);
if (!dup) {
ret = -ENOMEM;
goto err_out;
}
ret = expr__add_id(*combined, dup);
if (ret)
goto err_out;
}
}
}
return 0;
err_out:
expr__ctx_free(*combined);
*combined = NULL;
return ret;
}
/**
* parse_ids - Build the event string for the ids and parse them creating an
* evlist. The encoded metric_ids are decoded.
* @metric_no_merge: is metric sharing explicitly disabled.
* @fake_pmu: used when testing metrics not supported by the current CPU.
* @ids: the event identifiers parsed from a metric.
* @modifier: any modifiers added to the events.
* @group_events: should events be placed in a weak group.
* @tool_events: entries set true if the tool event of index could be present in
* the overall list of metrics.
* @out_evlist: the created list of events.
*/
static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
struct expr_parse_ctx *ids, const char *modifier,
bool group_events, const bool tool_events[PERF_TOOL_MAX],
struct evlist **out_evlist)
{
struct parse_events_error parse_error;
struct evlist *parsed_evlist;
struct strbuf events = STRBUF_INIT;
int ret;
*out_evlist = NULL;
if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
bool added_event = false;
int i;
/*
* We may fail to share events between metrics because a tool
* event isn't present in one metric. For example, a ratio of
* cache misses doesn't need duration_time but the same events
* may be used for a misses per second. Events without sharing
* implies multiplexing, that is best avoided, so place
* all tool events in every group.
*
* Also, there may be no ids/events in the expression parsing
* context because of constant evaluation, e.g.:
* event1 if #smt_on else 0
* Add a tool event to avoid a parse error on an empty string.
*/
perf_tool_event__for_each_event(i) {
if (tool_events[i]) {
char *tmp = strdup(perf_tool_event__to_str(i));
if (!tmp)
return -ENOMEM;
ids__insert(ids->ids, tmp);
added_event = true;
}
}
if (!added_event && hashmap__size(ids->ids) == 0) {
char *tmp = strdup("duration_time");
if (!tmp)
return -ENOMEM;
ids__insert(ids->ids, tmp);
}
}
ret = metricgroup__build_event_string(&events, ids, modifier,
group_events);
if (ret)
return ret;
parsed_evlist = evlist__new();
if (!parsed_evlist) {
ret = -ENOMEM;
goto err_out;
}
pr_debug("Parsing metric events '%s'\n", events.buf);
parse_events_error__init(&parse_error);
ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
&parse_error, fake_pmu, /*warn_if_reordered=*/false);
if (ret) {
parse_events_error__print(&parse_error, events.buf);
goto err_out;
}
ret = decode_all_metric_ids(parsed_evlist, modifier);
if (ret)
goto err_out;
*out_evlist = parsed_evlist;
parsed_evlist = NULL;
err_out:
parse_events_error__exit(&parse_error);
evlist__delete(parsed_evlist);
strbuf_release(&events);
return ret;
}
static int parse_groups(struct evlist *perf_evlist,
const char *pmu, const char *str,
bool metric_no_group,
bool metric_no_merge,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
struct perf_pmu *fake_pmu,
struct rblist *metric_events_list,
const struct pmu_metrics_table *table)
{
struct evlist *combined_evlist = NULL;
LIST_HEAD(metric_list);
struct metric *m;
bool tool_events[PERF_TOOL_MAX] = {false};
bool is_default = !strcmp(str, "Default");
int ret;
if (metric_events_list->nr_entries == 0)
metricgroup__rblist_init(metric_events_list);
ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
user_requested_cpu_list,
system_wide, &metric_list, table);
if (ret)
goto out;
/* Sort metrics from largest to smallest. */
list_sort(NULL, &metric_list, metric_list_cmp);
if (!metric_no_merge) {
struct expr_parse_ctx *combined = NULL;
find_tool_events(&metric_list, tool_events);
ret = build_combined_expr_ctx(&metric_list, &combined);
if (!ret && combined && hashmap__size(combined->ids)) {
ret = parse_ids(metric_no_merge, fake_pmu, combined,
/*modifier=*/NULL,
/*group_events=*/false,
tool_events,
&combined_evlist);
}
if (combined)
expr__ctx_free(combined);
if (ret)
goto out;
}
if (is_default)
list_sort(NULL, &metric_list, default_metricgroup_cmp);
list_for_each_entry(m, &metric_list, nd) {
struct metric_event *me;
struct evsel **metric_events;
struct evlist *metric_evlist = NULL;
struct metric *n;
struct metric_expr *expr;
if (combined_evlist && !m->group_events) {
metric_evlist = combined_evlist;
} else if (!metric_no_merge) {
/*
* See if the IDs for this metric are a subset of an
* earlier metric.
*/
list_for_each_entry(n, &metric_list, nd) {
if (m == n)
break;
if (n->evlist == NULL)
continue;
if ((!m->modifier && n->modifier) ||
(m->modifier && !n->modifier) ||
(m->modifier && n->modifier &&
strcmp(m->modifier, n->modifier)))
continue;
if ((!m->pmu && n->pmu) ||
(m->pmu && !n->pmu) ||
(m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
continue;
if (expr__subset_of_ids(n->pctx, m->pctx)) {
pr_debug("Events in '%s' fully contained within '%s'\n",
m->metric_name, n->metric_name);
metric_evlist = n->evlist;
break;
}
}
}
if (!metric_evlist) {
ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
m->group_events, tool_events, &m->evlist);
if (ret)
goto out;
metric_evlist = m->evlist;
}
ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
metric_evlist, &metric_events);
if (ret) {
pr_err("Cannot resolve IDs for %s: %s\n",
m->metric_name, m->metric_expr);
goto out;
}
me = metricgroup__lookup(metric_events_list, metric_events[0], true);
expr = malloc(sizeof(struct metric_expr));
if (!expr) {
ret = -ENOMEM;
free(metric_events);
goto out;
}
expr->metric_refs = m->metric_refs;
m->metric_refs = NULL;
expr->metric_expr = m->metric_expr;
if (m->modifier) {
char *tmp;
if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
expr->metric_name = NULL;
else
expr->metric_name = tmp;
} else
expr->metric_name = strdup(m->metric_name);
if (!expr->metric_name) {
ret = -ENOMEM;
free(metric_events);
goto out;
}
expr->metric_threshold = m->metric_threshold;
expr->metric_unit = m->metric_unit;
expr->metric_events = metric_events;
expr->runtime = m->pctx->sctx.runtime;
expr->default_metricgroup_name = m->default_metricgroup_name;
me->is_default = is_default;
list_add(&expr->nd, &me->head);
}
if (combined_evlist) {
evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
evlist__delete(combined_evlist);
}
list_for_each_entry(m, &metric_list, nd) {
if (m->evlist)
evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
}
out:
metricgroup__free_metrics(&metric_list);
return ret;
}
int metricgroup__parse_groups(struct evlist *perf_evlist,
const char *pmu,
const char *str,
bool metric_no_group,
bool metric_no_merge,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
struct rblist *metric_events)
{
const struct pmu_metrics_table *table = pmu_metrics_table__find();
if (!table)
return -EINVAL;
return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
metric_no_threshold, user_requested_cpu_list, system_wide,
/*fake_pmu=*/NULL, metric_events, table);
}
int metricgroup__parse_groups_test(struct evlist *evlist,
const struct pmu_metrics_table *table,
const char *str,
struct rblist *metric_events)
{
return parse_groups(evlist, "all", str,
/*metric_no_group=*/false,
/*metric_no_merge=*/false,
/*metric_no_threshold=*/false,
/*user_requested_cpu_list=*/NULL,
/*system_wide=*/false,
&perf_pmu__fake, metric_events, table);
}
struct metricgroup__has_metric_data {
const char *pmu;
const char *metric;
};
static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table __maybe_unused,
void *vdata)
{
struct metricgroup__has_metric_data *data = vdata;
return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0;
}
bool metricgroup__has_metric(const char *pmu, const char *metric)
{
const struct pmu_metrics_table *table = pmu_metrics_table__find();
struct metricgroup__has_metric_data data = {
.pmu = pmu,
.metric = metric,
};
if (!table)
return false;
return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
? true : false;
}
static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table __maybe_unused,
void *data)
{
unsigned int *max_level = data;
unsigned int level;
const char *p = strstr(pm->metric_group ?: "", "TopdownL");
if (!p || p[8] == '\0')
return 0;
level = p[8] - '0';
if (level > *max_level)
*max_level = level;
return 0;
}
unsigned int metricgroups__topdown_max_level(void)
{
unsigned int max_level = 0;
const struct pmu_metrics_table *table = pmu_metrics_table__find();
if (!table)
return false;
pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
&max_level);
return max_level;
}
int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
struct rblist *new_metric_events,
struct rblist *old_metric_events)
{
unsigned int i;
for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
struct rb_node *nd;
struct metric_event *old_me, *new_me;
struct metric_expr *old_expr, *new_expr;
struct evsel *evsel;
size_t alloc_size;
int idx, nr;
nd = rblist__entry(old_metric_events, i);
old_me = container_of(nd, struct metric_event, nd);
evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
if (!evsel)
return -EINVAL;
new_me = metricgroup__lookup(new_metric_events, evsel, true);
if (!new_me)
return -ENOMEM;
pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
list_for_each_entry(old_expr, &old_me->head, nd) {
new_expr = malloc(sizeof(*new_expr));
if (!new_expr)
return -ENOMEM;
new_expr->metric_expr = old_expr->metric_expr;
new_expr->metric_threshold = old_expr->metric_threshold;
new_expr->metric_name = strdup(old_expr->metric_name);
if (!new_expr->metric_name)
return -ENOMEM;
new_expr->metric_unit = old_expr->metric_unit;
new_expr->runtime = old_expr->runtime;
if (old_expr->metric_refs) {
/* calculate number of metric_events */
for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
continue;
alloc_size = sizeof(*new_expr->metric_refs);
new_expr->metric_refs = calloc(nr + 1, alloc_size);
if (!new_expr->metric_refs) {
free(new_expr);
return -ENOMEM;
}
memcpy(new_expr->metric_refs, old_expr->metric_refs,
nr * alloc_size);
} else {
new_expr->metric_refs = NULL;
}
/* calculate number of metric_events */
for (nr = 0; old_expr->metric_events[nr]; nr++)
continue;
alloc_size = sizeof(*new_expr->metric_events);
new_expr->metric_events = calloc(nr + 1, alloc_size);
if (!new_expr->metric_events) {
zfree(&new_expr->metric_refs);
free(new_expr);
return -ENOMEM;
}
/* copy evsel in the same position */
for (idx = 0; idx < nr; idx++) {
evsel = old_expr->metric_events[idx];
evsel = evlist__find_evsel(evlist, evsel->core.idx);
if (evsel == NULL) {
zfree(&new_expr->metric_events);
zfree(&new_expr->metric_refs);
free(new_expr);
return -EINVAL;
}
new_expr->metric_events[idx] = evsel;
}
list_add(&new_expr->nd, &new_me->head);
}
}
return 0;
}
| linux-master | tools/perf/util/metricgroup.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/bpf_counter.h"
#include "util/debug.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/off_cpu.h"
#include "util/perf-hooks.h"
#include "util/record.h"
#include "util/session.h"
#include "util/target.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/cgroup.h"
#include "util/strlist.h"
#include <bpf/bpf.h>
#include "bpf_skel/off_cpu.skel.h"
#define MAX_STACKS 32
#define MAX_PROC 4096
/* we don't need actual timestamp, just want to put the samples at last */
#define OFF_CPU_TIMESTAMP (~0ull << 32)
static struct off_cpu_bpf *skel;
struct off_cpu_key {
u32 pid;
u32 tgid;
u32 stack_id;
u32 state;
u64 cgroup_id;
};
union off_cpu_data {
struct perf_event_header hdr;
u64 array[1024 / sizeof(u64)];
};
static int off_cpu_config(struct evlist *evlist)
{
struct evsel *evsel;
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_BPF_OUTPUT,
.size = sizeof(attr), /* to capture ABI version */
};
char *evname = strdup(OFFCPU_EVENT);
if (evname == NULL)
return -ENOMEM;
evsel = evsel__new(&attr);
if (!evsel) {
free(evname);
return -ENOMEM;
}
evsel->core.attr.freq = 1;
evsel->core.attr.sample_period = 1;
/* off-cpu analysis depends on stack trace */
evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
evlist__add(evlist, evsel);
free(evsel->name);
evsel->name = evname;
return 0;
}
static void off_cpu_start(void *arg)
{
struct evlist *evlist = arg;
/* update task filter for the given workload */
if (!skel->bss->has_cpu && !skel->bss->has_task &&
perf_thread_map__pid(evlist->core.threads, 0) != -1) {
int fd;
u32 pid;
u8 val = 1;
skel->bss->has_task = 1;
skel->bss->uses_tgid = 1;
fd = bpf_map__fd(skel->maps.task_filter);
pid = perf_thread_map__pid(evlist->core.threads, 0);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
skel->bss->enabled = 1;
}
static void off_cpu_finish(void *arg __maybe_unused)
{
skel->bss->enabled = 0;
off_cpu_bpf__destroy(skel);
}
/* v5.18 kernel added prev_state arg, so it needs to check the signature */
static void check_sched_switch_args(void)
{
const struct btf *btf = bpf_object__btf(skel->obj);
const struct btf_type *t1, *t2, *t3;
u32 type_id;
type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
BTF_KIND_TYPEDEF);
if ((s32)type_id < 0)
return;
t1 = btf__type_by_id(btf, type_id);
if (t1 == NULL)
return;
t2 = btf__type_by_id(btf, t1->type);
if (t2 == NULL || !btf_is_ptr(t2))
return;
t3 = btf__type_by_id(btf, t2->type);
if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
/* new format: pass prev_state as 4th arg */
skel->rodata->has_prev_state = true;
}
}
int off_cpu_prepare(struct evlist *evlist, struct target *target,
struct record_opts *opts)
{
int err, fd, i;
int ncpus = 1, ntasks = 1, ncgrps = 1;
struct strlist *pid_slist = NULL;
struct str_node *pos;
if (off_cpu_config(evlist) < 0) {
pr_err("Failed to config off-cpu BPF event\n");
return -1;
}
skel = off_cpu_bpf__open();
if (!skel) {
pr_err("Failed to open off-cpu BPF skeleton\n");
return -1;
}
/* don't need to set cpu filter for system-wide mode */
if (target->cpu_list) {
ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
}
if (target->pid) {
pid_slist = strlist__new(target->pid, NULL);
if (!pid_slist) {
pr_err("Failed to create a strlist for pid\n");
return -1;
}
ntasks = 0;
strlist__for_each_entry(pos, pid_slist) {
char *end_ptr;
int pid = strtol(pos->s, &end_ptr, 10);
if (pid == INT_MIN || pid == INT_MAX ||
(*end_ptr != '\0' && *end_ptr != ','))
continue;
ntasks++;
}
if (ntasks < MAX_PROC)
ntasks = MAX_PROC;
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
} else if (target__has_task(target)) {
ntasks = perf_thread_map__nr(evlist->core.threads);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
} else if (target__none(target)) {
bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
}
if (evlist__first(evlist)->cgrp) {
ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
if (!cgroup_is_v2("perf_event"))
skel->rodata->uses_cgroup_v1 = true;
}
if (opts->record_cgroup) {
skel->rodata->needs_cgroup = true;
if (!cgroup_is_v2("perf_event"))
skel->rodata->uses_cgroup_v1 = true;
}
set_max_rlimit();
check_sched_switch_args();
err = off_cpu_bpf__load(skel);
if (err) {
pr_err("Failed to load off-cpu skeleton\n");
goto out;
}
if (target->cpu_list) {
u32 cpu;
u8 val = 1;
skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
}
}
if (target->pid) {
u8 val = 1;
skel->bss->has_task = 1;
skel->bss->uses_tgid = 1;
fd = bpf_map__fd(skel->maps.task_filter);
strlist__for_each_entry(pos, pid_slist) {
char *end_ptr;
u32 tgid;
int pid = strtol(pos->s, &end_ptr, 10);
if (pid == INT_MIN || pid == INT_MAX ||
(*end_ptr != '\0' && *end_ptr != ','))
continue;
tgid = pid;
bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
}
} else if (target__has_task(target)) {
u32 pid;
u8 val = 1;
skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
pid = perf_thread_map__pid(evlist->core.threads, i);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
}
if (evlist__first(evlist)->cgrp) {
struct evsel *evsel;
u8 val = 1;
skel->bss->has_cgroup = 1;
fd = bpf_map__fd(skel->maps.cgroup_filter);
evlist__for_each_entry(evlist, evsel) {
struct cgroup *cgrp = evsel->cgrp;
if (cgrp == NULL)
continue;
if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
pr_err("Failed to read cgroup id of %s\n",
cgrp->name);
goto out;
}
bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
}
}
err = off_cpu_bpf__attach(skel);
if (err) {
pr_err("Failed to attach off-cpu BPF skeleton\n");
goto out;
}
if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
pr_err("Failed to attach off-cpu skeleton\n");
goto out;
}
return 0;
out:
off_cpu_bpf__destroy(skel);
return -1;
}
int off_cpu_write(struct perf_session *session)
{
int bytes = 0, size;
int fd, stack;
u64 sample_type, val, sid = 0;
struct evsel *evsel;
struct perf_data_file *file = &session->data->file;
struct off_cpu_key prev, key;
union off_cpu_data data = {
.hdr = {
.type = PERF_RECORD_SAMPLE,
.misc = PERF_RECORD_MISC_USER,
},
};
u64 tstamp = OFF_CPU_TIMESTAMP;
skel->bss->enabled = 0;
evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
if (evsel == NULL) {
pr_err("%s evsel not found\n", OFFCPU_EVENT);
return 0;
}
sample_type = evsel->core.attr.sample_type;
if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
pr_err("not supported sample type: %llx\n",
(unsigned long long)sample_type);
return -1;
}
if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
if (evsel->core.id)
sid = evsel->core.id[0];
}
fd = bpf_map__fd(skel->maps.off_cpu);
stack = bpf_map__fd(skel->maps.stacks);
memset(&prev, 0, sizeof(prev));
while (!bpf_map_get_next_key(fd, &prev, &key)) {
int n = 1; /* start from perf_event_header */
int ip_pos = -1;
bpf_map_lookup_elem(fd, &key, &val);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
data.array[n++] = sid;
if (sample_type & PERF_SAMPLE_IP) {
ip_pos = n;
data.array[n++] = 0; /* will be updated */
}
if (sample_type & PERF_SAMPLE_TID)
data.array[n++] = (u64)key.pid << 32 | key.tgid;
if (sample_type & PERF_SAMPLE_TIME)
data.array[n++] = tstamp;
if (sample_type & PERF_SAMPLE_ID)
data.array[n++] = sid;
if (sample_type & PERF_SAMPLE_CPU)
data.array[n++] = 0;
if (sample_type & PERF_SAMPLE_PERIOD)
data.array[n++] = val;
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int len = 0;
/* data.array[n] is callchain->nr (updated later) */
data.array[n + 1] = PERF_CONTEXT_USER;
data.array[n + 2] = 0;
bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
while (data.array[n + 2 + len])
len++;
/* update length of callchain */
data.array[n] = len + 1;
/* update sample ip with the first callchain entry */
if (ip_pos >= 0)
data.array[ip_pos] = data.array[n + 2];
/* calculate sample callchain data array length */
n += len + 2;
}
if (sample_type & PERF_SAMPLE_CGROUP)
data.array[n++] = key.cgroup_id;
size = n * sizeof(u64);
data.hdr.size = size;
bytes += size;
if (perf_data_file__write(file, &data, size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return bytes;
}
prev = key;
/* increase dummy timestamp to sort later samples */
tstamp++;
}
return bytes;
}
| linux-master | tools/perf/util/bpf_off_cpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* genelf_debug.c
* Copyright (C) 2015, Google, Inc
*
* Contributed by:
* Stephane Eranian <[email protected]>
*
* based on GPLv2 source code from Oprofile
* @remark Copyright 2007 OProfile authors
* @author Philippe Elie
*/
#include <linux/compiler.h>
#include <linux/zalloc.h>
#include <sys/types.h>
#include <stdio.h>
#include <getopt.h>
#include <stddef.h>
#include <libelf.h>
#include <string.h>
#include <stdlib.h>
#include <inttypes.h>
#include <limits.h>
#include <fcntl.h>
#include <err.h>
#include <dwarf.h>
#include "genelf.h"
#include "../util/jitdump.h"
#define BUFFER_EXT_DFL_SIZE (4 * 1024)
typedef uint32_t uword;
typedef uint16_t uhalf;
typedef int32_t sword;
typedef int16_t shalf;
typedef uint8_t ubyte;
typedef int8_t sbyte;
struct buffer_ext {
size_t cur_pos;
size_t max_sz;
void *data;
};
static void
buffer_ext_dump(struct buffer_ext *be, const char *msg)
{
size_t i;
warnx("DUMP for %s", msg);
for (i = 0 ; i < be->cur_pos; i++)
warnx("%4zu 0x%02x", i, (((char *)be->data)[i]) & 0xff);
}
static inline int
buffer_ext_add(struct buffer_ext *be, void *addr, size_t sz)
{
void *tmp;
size_t be_sz = be->max_sz;
retry:
if ((be->cur_pos + sz) < be_sz) {
memcpy(be->data + be->cur_pos, addr, sz);
be->cur_pos += sz;
return 0;
}
if (!be_sz)
be_sz = BUFFER_EXT_DFL_SIZE;
else
be_sz <<= 1;
tmp = realloc(be->data, be_sz);
if (!tmp)
return -1;
be->data = tmp;
be->max_sz = be_sz;
goto retry;
}
static void
buffer_ext_init(struct buffer_ext *be)
{
be->data = NULL;
be->cur_pos = 0;
be->max_sz = 0;
}
static void
buffer_ext_exit(struct buffer_ext *be)
{
zfree(&be->data);
}
static inline size_t
buffer_ext_size(struct buffer_ext *be)
{
return be->cur_pos;
}
static inline void *
buffer_ext_addr(struct buffer_ext *be)
{
return be->data;
}
struct debug_line_header {
// Not counting this field
uword total_length;
// version number (2 currently)
uhalf version;
// relative offset from next field to
// program statement
uword prolog_length;
ubyte minimum_instruction_length;
ubyte default_is_stmt;
// line_base - see DWARF 2 specs
sbyte line_base;
// line_range - see DWARF 2 specs
ubyte line_range;
// number of opcode + 1
ubyte opcode_base;
/* follow the array of opcode args nr: ubytes [nr_opcode_base] */
/* follow the search directories index, zero terminated string
* terminated by an empty string.
*/
/* follow an array of { filename, LEB128, LEB128, LEB128 }, first is
* the directory index entry, 0 means current directory, then mtime
* and filesize, last entry is followed by en empty string.
*/
/* follow the first program statement */
} __packed;
/* DWARF 2 spec talk only about one possible compilation unit header while
* binutils can handle two flavours of dwarf 2, 32 and 64 bits, this is not
* related to the used arch, an ELF 32 can hold more than 4 Go of debug
* information. For now we handle only DWARF 2 32 bits comp unit. It'll only
* become a problem if we generate more than 4GB of debug information.
*/
struct compilation_unit_header {
uword total_length;
uhalf version;
uword debug_abbrev_offset;
ubyte pointer_size;
} __packed;
#define DW_LNS_num_opcode (DW_LNS_set_isa + 1)
/* field filled at run time are marked with -1 */
static struct debug_line_header const default_debug_line_header = {
.total_length = -1,
.version = 2,
.prolog_length = -1,
.minimum_instruction_length = 1, /* could be better when min instruction size != 1 */
.default_is_stmt = 1, /* we don't take care about basic block */
.line_base = -5, /* sensible value for line base ... */
.line_range = -14, /* ... and line range are guessed statically */
.opcode_base = DW_LNS_num_opcode
};
static ubyte standard_opcode_length[] =
{
0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1
};
#if 0
{
[DW_LNS_advance_pc] = 1,
[DW_LNS_advance_line] = 1,
[DW_LNS_set_file] = 1,
[DW_LNS_set_column] = 1,
[DW_LNS_fixed_advance_pc] = 1,
[DW_LNS_set_isa] = 1,
};
#endif
/* field filled at run time are marked with -1 */
static struct compilation_unit_header default_comp_unit_header = {
.total_length = -1,
.version = 2,
.debug_abbrev_offset = 0, /* we reuse the same abbrev entries for all comp unit */
.pointer_size = sizeof(void *)
};
static void emit_uword(struct buffer_ext *be, uword data)
{
buffer_ext_add(be, &data, sizeof(uword));
}
static void emit_string(struct buffer_ext *be, const char *s)
{
buffer_ext_add(be, (void *)s, strlen(s) + 1);
}
static void emit_unsigned_LEB128(struct buffer_ext *be,
unsigned long data)
{
do {
ubyte cur = data & 0x7F;
data >>= 7;
if (data)
cur |= 0x80;
buffer_ext_add(be, &cur, 1);
} while (data);
}
static void emit_signed_LEB128(struct buffer_ext *be, long data)
{
int more = 1;
int negative = data < 0;
int size = sizeof(long) * CHAR_BIT;
while (more) {
ubyte cur = data & 0x7F;
data >>= 7;
if (negative)
data |= - (1 << (size - 7));
if ((data == 0 && !(cur & 0x40)) ||
(data == -1l && (cur & 0x40)))
more = 0;
else
cur |= 0x80;
buffer_ext_add(be, &cur, 1);
}
}
static void emit_extended_opcode(struct buffer_ext *be, ubyte opcode,
void *data, size_t data_len)
{
buffer_ext_add(be, (char *)"", 1);
emit_unsigned_LEB128(be, data_len + 1);
buffer_ext_add(be, &opcode, 1);
buffer_ext_add(be, data, data_len);
}
static void emit_opcode(struct buffer_ext *be, ubyte opcode)
{
buffer_ext_add(be, &opcode, 1);
}
static void emit_opcode_signed(struct buffer_ext *be,
ubyte opcode, long data)
{
buffer_ext_add(be, &opcode, 1);
emit_signed_LEB128(be, data);
}
static void emit_opcode_unsigned(struct buffer_ext *be, ubyte opcode,
unsigned long data)
{
buffer_ext_add(be, &opcode, 1);
emit_unsigned_LEB128(be, data);
}
static void emit_advance_pc(struct buffer_ext *be, unsigned long delta_pc)
{
emit_opcode_unsigned(be, DW_LNS_advance_pc, delta_pc);
}
static void emit_advance_lineno(struct buffer_ext *be, long delta_lineno)
{
emit_opcode_signed(be, DW_LNS_advance_line, delta_lineno);
}
static void emit_lne_end_of_sequence(struct buffer_ext *be)
{
emit_extended_opcode(be, DW_LNE_end_sequence, NULL, 0);
}
static void emit_set_file(struct buffer_ext *be, unsigned long idx)
{
emit_opcode_unsigned(be, DW_LNS_set_file, idx);
}
static void emit_lne_define_filename(struct buffer_ext *be,
const char *filename)
{
buffer_ext_add(be, (void *)"", 1);
/* LNE field, strlen(filename) + zero termination, 3 bytes for: the dir entry, timestamp, filesize */
emit_unsigned_LEB128(be, strlen(filename) + 5);
emit_opcode(be, DW_LNE_define_file);
emit_string(be, filename);
/* directory index 0=do not know */
emit_unsigned_LEB128(be, 0);
/* last modification date on file 0=do not know */
emit_unsigned_LEB128(be, 0);
/* filesize 0=do not know */
emit_unsigned_LEB128(be, 0);
}
static void emit_lne_set_address(struct buffer_ext *be,
void *address)
{
emit_extended_opcode(be, DW_LNE_set_address, &address, sizeof(unsigned long));
}
static ubyte get_special_opcode(struct debug_entry *ent,
unsigned int last_line,
unsigned long last_vma)
{
unsigned int temp;
unsigned long delta_addr;
/*
* delta from line_base
*/
temp = (ent->lineno - last_line) - default_debug_line_header.line_base;
if (temp >= default_debug_line_header.line_range)
return 0;
/*
* delta of addresses
*/
delta_addr = (ent->addr - last_vma) / default_debug_line_header.minimum_instruction_length;
/* This is not sufficient to ensure opcode will be in [0-256] but
* sufficient to ensure when summing with the delta lineno we will
* not overflow the unsigned long opcode */
if (delta_addr <= 256 / default_debug_line_header.line_range) {
unsigned long opcode = temp +
(delta_addr * default_debug_line_header.line_range) +
default_debug_line_header.opcode_base;
return opcode <= 255 ? opcode : 0;
}
return 0;
}
static void emit_lineno_info(struct buffer_ext *be,
struct debug_entry *ent, size_t nr_entry,
unsigned long code_addr)
{
size_t i;
/* as described in the jitdump format */
const char repeated_name_marker[] = {'\xff', '\0'};
/*
* Machine state at start of a statement program
* address = 0
* file = 1
* line = 1
* column = 0
* is_stmt = default_is_stmt as given in the debug_line_header
* basic block = 0
* end sequence = 0
*/
/* start state of the state machine we take care of */
unsigned long last_vma = 0;
char const *cur_filename = NULL;
unsigned long cur_file_idx = 0;
int last_line = 1;
emit_lne_set_address(be, (void *)code_addr);
for (i = 0; i < nr_entry; i++, ent = debug_entry_next(ent)) {
int need_copy = 0;
ubyte special_opcode;
/*
* check if filename changed, if so add it
*/
if ((!cur_filename || strcmp(cur_filename, ent->name)) &&
strcmp(repeated_name_marker, ent->name)) {
emit_lne_define_filename(be, ent->name);
cur_filename = ent->name;
emit_set_file(be, ++cur_file_idx);
need_copy = 1;
}
special_opcode = get_special_opcode(ent, last_line, last_vma);
if (special_opcode != 0) {
last_line = ent->lineno;
last_vma = ent->addr;
emit_opcode(be, special_opcode);
} else {
/*
* lines differ, emit line delta
*/
if (last_line != ent->lineno) {
emit_advance_lineno(be, ent->lineno - last_line);
last_line = ent->lineno;
need_copy = 1;
}
/*
* addresses differ, emit address delta
*/
if (last_vma != ent->addr) {
emit_advance_pc(be, ent->addr - last_vma);
last_vma = ent->addr;
need_copy = 1;
}
/*
* add new row to matrix
*/
if (need_copy)
emit_opcode(be, DW_LNS_copy);
}
}
}
static void add_debug_line(struct buffer_ext *be,
struct debug_entry *ent, size_t nr_entry,
unsigned long code_addr)
{
struct debug_line_header * dbg_header;
size_t old_size;
old_size = buffer_ext_size(be);
buffer_ext_add(be, (void *)&default_debug_line_header,
sizeof(default_debug_line_header));
buffer_ext_add(be, &standard_opcode_length, sizeof(standard_opcode_length));
// empty directory entry
buffer_ext_add(be, (void *)"", 1);
// empty filename directory
buffer_ext_add(be, (void *)"", 1);
dbg_header = buffer_ext_addr(be) + old_size;
dbg_header->prolog_length = (buffer_ext_size(be) - old_size) -
offsetof(struct debug_line_header, minimum_instruction_length);
emit_lineno_info(be, ent, nr_entry, code_addr);
emit_lne_end_of_sequence(be);
dbg_header = buffer_ext_addr(be) + old_size;
dbg_header->total_length = (buffer_ext_size(be) - old_size) -
offsetof(struct debug_line_header, version);
}
static void
add_debug_abbrev(struct buffer_ext *be)
{
emit_unsigned_LEB128(be, 1);
emit_unsigned_LEB128(be, DW_TAG_compile_unit);
emit_unsigned_LEB128(be, DW_CHILDREN_yes);
emit_unsigned_LEB128(be, DW_AT_stmt_list);
emit_unsigned_LEB128(be, DW_FORM_data4);
emit_unsigned_LEB128(be, 0);
emit_unsigned_LEB128(be, 0);
emit_unsigned_LEB128(be, 0);
}
static void
add_compilation_unit(struct buffer_ext *be,
size_t offset_debug_line)
{
struct compilation_unit_header *comp_unit_header;
size_t old_size = buffer_ext_size(be);
buffer_ext_add(be, &default_comp_unit_header,
sizeof(default_comp_unit_header));
emit_unsigned_LEB128(be, 1);
emit_uword(be, offset_debug_line);
comp_unit_header = buffer_ext_addr(be) + old_size;
comp_unit_header->total_length = (buffer_ext_size(be) - old_size) -
offsetof(struct compilation_unit_header, version);
}
static int
jit_process_debug_info(uint64_t code_addr,
void *debug, int nr_debug_entries,
struct buffer_ext *dl,
struct buffer_ext *da,
struct buffer_ext *di)
{
struct debug_entry *ent = debug;
int i;
for (i = 0; i < nr_debug_entries; i++) {
ent->addr = ent->addr - code_addr;
ent = debug_entry_next(ent);
}
add_compilation_unit(di, buffer_ext_size(dl));
add_debug_line(dl, debug, nr_debug_entries, GEN_ELF_TEXT_OFFSET);
add_debug_abbrev(da);
if (0) buffer_ext_dump(da, "abbrev");
return 0;
}
int
jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries)
{
Elf_Data *d;
Elf_Scn *scn;
Elf_Shdr *shdr;
struct buffer_ext dl, di, da;
int ret = -1;
buffer_ext_init(&dl);
buffer_ext_init(&di);
buffer_ext_init(&da);
if (jit_process_debug_info(code_addr, debug, nr_debug_entries, &dl, &da, &di))
goto out;
/*
* setup .debug_line section
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto out;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto out;
}
d->d_align = 1;
d->d_off = 0LL;
d->d_buf = buffer_ext_addr(&dl);
d->d_type = ELF_T_BYTE;
d->d_size = buffer_ext_size(&dl);
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto out;
}
shdr->sh_name = 52; /* .debug_line */
shdr->sh_type = SHT_PROGBITS;
shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
shdr->sh_flags = 0;
shdr->sh_entsize = 0;
/*
* setup .debug_info section
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto out;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto out;
}
d->d_align = 1;
d->d_off = 0LL;
d->d_buf = buffer_ext_addr(&di);
d->d_type = ELF_T_BYTE;
d->d_size = buffer_ext_size(&di);
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto out;
}
shdr->sh_name = 64; /* .debug_info */
shdr->sh_type = SHT_PROGBITS;
shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
shdr->sh_flags = 0;
shdr->sh_entsize = 0;
/*
* setup .debug_abbrev section
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto out;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto out;
}
d->d_align = 1;
d->d_off = 0LL;
d->d_buf = buffer_ext_addr(&da);
d->d_type = ELF_T_BYTE;
d->d_size = buffer_ext_size(&da);
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto out;
}
shdr->sh_name = 76; /* .debug_info */
shdr->sh_type = SHT_PROGBITS;
shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
shdr->sh_flags = 0;
shdr->sh_entsize = 0;
/*
* now we update the ELF image with all the sections
*/
if (elf_update(e, ELF_C_WRITE) < 0)
warnx("elf_update debug failed");
else
ret = 0;
out:
buffer_ext_exit(&dl);
buffer_ext_exit(&di);
buffer_ext_exit(&da);
return ret;
}
| linux-master | tools/perf/util/genelf_debug.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/hw_breakpoint.h>
#include <linux/err.h>
#include <linux/list_sort.h>
#include <linux/zalloc.h>
#include <dirent.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/param.h>
#include "term.h"
#include "evlist.h"
#include "evsel.h"
#include <subcmd/parse-options.h>
#include "parse-events.h"
#include "string2.h"
#include "strbuf.h"
#include "debug.h"
#include <api/fs/tracing_path.h>
#include <perf/cpumap.h>
#include <util/parse-events-bison.h>
#include <util/parse-events-flex.h>
#include "pmu.h"
#include "pmus.h"
#include "asm/bug.h"
#include "util/parse-branch-options.h"
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/bpf-filter.h"
#include "util/util.h"
#include "tracepoint.h"
#define MAX_NAME_LEN 100
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused);
struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = {
.symbol = "cpu-cycles",
.alias = "cycles",
},
[PERF_COUNT_HW_INSTRUCTIONS] = {
.symbol = "instructions",
.alias = "",
},
[PERF_COUNT_HW_CACHE_REFERENCES] = {
.symbol = "cache-references",
.alias = "",
},
[PERF_COUNT_HW_CACHE_MISSES] = {
.symbol = "cache-misses",
.alias = "",
},
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
.symbol = "branch-instructions",
.alias = "branches",
},
[PERF_COUNT_HW_BRANCH_MISSES] = {
.symbol = "branch-misses",
.alias = "",
},
[PERF_COUNT_HW_BUS_CYCLES] = {
.symbol = "bus-cycles",
.alias = "",
},
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
.symbol = "stalled-cycles-frontend",
.alias = "idle-cycles-frontend",
},
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
.symbol = "stalled-cycles-backend",
.alias = "idle-cycles-backend",
},
[PERF_COUNT_HW_REF_CPU_CYCLES] = {
.symbol = "ref-cycles",
.alias = "",
},
};
struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
[PERF_COUNT_SW_CPU_CLOCK] = {
.symbol = "cpu-clock",
.alias = "",
},
[PERF_COUNT_SW_TASK_CLOCK] = {
.symbol = "task-clock",
.alias = "",
},
[PERF_COUNT_SW_PAGE_FAULTS] = {
.symbol = "page-faults",
.alias = "faults",
},
[PERF_COUNT_SW_CONTEXT_SWITCHES] = {
.symbol = "context-switches",
.alias = "cs",
},
[PERF_COUNT_SW_CPU_MIGRATIONS] = {
.symbol = "cpu-migrations",
.alias = "migrations",
},
[PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
.symbol = "minor-faults",
.alias = "",
},
[PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
.symbol = "major-faults",
.alias = "",
},
[PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
.symbol = "alignment-faults",
.alias = "",
},
[PERF_COUNT_SW_EMULATION_FAULTS] = {
.symbol = "emulation-faults",
.alias = "",
},
[PERF_COUNT_SW_DUMMY] = {
.symbol = "dummy",
.alias = "",
},
[PERF_COUNT_SW_BPF_OUTPUT] = {
.symbol = "bpf-output",
.alias = "",
},
[PERF_COUNT_SW_CGROUP_SWITCHES] = {
.symbol = "cgroup-switches",
.alias = "",
},
};
const char *event_type(int type)
{
switch (type) {
case PERF_TYPE_HARDWARE:
return "hardware";
case PERF_TYPE_SOFTWARE:
return "software";
case PERF_TYPE_TRACEPOINT:
return "tracepoint";
case PERF_TYPE_HW_CACHE:
return "hardware-cache";
default:
break;
}
return "unknown";
}
static char *get_config_str(struct list_head *head_terms, enum parse_events__term_type type_term)
{
struct parse_events_term *term;
if (!head_terms)
return NULL;
list_for_each_entry(term, head_terms, list)
if (term->type_term == type_term)
return term->val.str;
return NULL;
}
static char *get_config_metric_id(struct list_head *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
}
static char *get_config_name(struct list_head *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
}
/**
* fix_raw - For each raw term see if there is an event (aka alias) in pmu that
* matches the raw's string value. If the string value matches an
* event then change the term to be an event, if not then change it to
* be a config term. For example, "read" may be an event of the PMU or
* a raw hex encoding of 0xead. The fix-up is done late so the PMU of
* the event can be determined and we don't need to scan all PMUs
* ahead-of-time.
* @config_terms: the list of terms that may contain a raw term.
* @pmu: the PMU to scan for events from.
*/
static void fix_raw(struct list_head *config_terms, struct perf_pmu *pmu)
{
struct parse_events_term *term;
list_for_each_entry(term, config_terms, list) {
u64 num;
if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
continue;
if (perf_pmu__have_event(pmu, term->val.str)) {
zfree(&term->config);
term->config = term->val.str;
term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
term->val.num = 1;
term->no_value = true;
continue;
}
zfree(&term->config);
term->config = strdup("config");
errno = 0;
num = strtoull(term->val.str + 1, NULL, 16);
assert(errno == 0);
free(term->val.str);
term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
term->val.num = num;
term->no_value = false;
}
}
static struct evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
bool init_attr,
const char *name, const char *metric_id, struct perf_pmu *pmu,
struct list_head *config_terms, bool auto_merge_stats,
const char *cpu_list)
{
struct evsel *evsel;
struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
if (pmu)
perf_pmu__warn_invalid_formats(pmu);
if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) {
perf_pmu__warn_invalid_config(pmu, attr->config, name,
PERF_PMU_FORMAT_VALUE_CONFIG, "config");
perf_pmu__warn_invalid_config(pmu, attr->config1, name,
PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
perf_pmu__warn_invalid_config(pmu, attr->config2, name,
PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
perf_pmu__warn_invalid_config(pmu, attr->config3, name,
PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
}
if (init_attr)
event_attr_init(attr);
evsel = evsel__new_idx(attr, *idx);
if (!evsel) {
perf_cpu_map__put(cpus);
return NULL;
}
(*idx)++;
evsel->core.cpus = cpus;
evsel->core.own_cpus = perf_cpu_map__get(cpus);
evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
evsel->auto_merge_stats = auto_merge_stats;
evsel->pmu = pmu;
evsel->pmu_name = pmu ? strdup(pmu->name) : NULL;
if (name)
evsel->name = strdup(name);
if (metric_id)
evsel->metric_id = strdup(metric_id);
if (config_terms)
list_splice_init(config_terms, &evsel->config_terms);
if (list)
list_add_tail(&evsel->core.node, list);
return evsel;
}
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
const char *name, const char *metric_id,
struct perf_pmu *pmu)
{
return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
metric_id, pmu, /*config_terms=*/NULL,
/*auto_merge_stats=*/false, /*cpu_list=*/NULL);
}
static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, const char *name,
const char *metric_id, struct list_head *config_terms)
{
return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
/*pmu=*/NULL, config_terms,
/*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
}
static int add_event_tool(struct list_head *list, int *idx,
enum perf_tool_event tool_event)
{
struct evsel *evsel;
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
};
evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, /*pmu=*/NULL,
/*config_terms=*/NULL, /*auto_merge_stats=*/false,
/*cpu_list=*/"0");
if (!evsel)
return -ENOMEM;
evsel->tool_event = tool_event;
if (tool_event == PERF_TOOL_DURATION_TIME
|| tool_event == PERF_TOOL_USER_TIME
|| tool_event == PERF_TOOL_SYSTEM_TIME) {
free((char *)evsel->unit);
evsel->unit = strdup("ns");
}
return 0;
}
/**
* parse_aliases - search names for entries beginning or equalling str ignoring
* case. If mutliple entries in names match str then the longest
* is chosen.
* @str: The needle to look for.
* @names: The haystack to search.
* @size: The size of the haystack.
* @longest: Out argument giving the length of the matching entry.
*/
static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
int *longest)
{
*longest = -1;
for (int i = 0; i < size; i++) {
for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
int n = strlen(names[i][j]);
if (n > *longest && !strncasecmp(str, names[i][j], n))
*longest = n;
}
if (*longest > 0)
return i;
}
return -1;
}
typedef int config_term_func_t(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err);
static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err);
static int config_attr(struct perf_event_attr *attr,
struct list_head *head,
struct parse_events_error *err,
config_term_func_t config_term);
/**
* parse_events__decode_legacy_cache - Search name for the legacy cache event
* name composed of 1, 2 or 3 hyphen
* separated sections. The first section is
* the cache type while the others are the
* optional op and optional result. To make
* life hard the names in the table also
* contain hyphens and the longest name
* should always be selected.
*/
int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
{
int len, cache_type = -1, cache_op = -1, cache_result = -1;
const char *name_end = &name[strlen(name) + 1];
const char *str = name;
cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
if (cache_type == -1)
return -EINVAL;
str += len + 1;
if (str < name_end) {
cache_op = parse_aliases(str, evsel__hw_cache_op,
PERF_COUNT_HW_CACHE_OP_MAX, &len);
if (cache_op >= 0) {
if (!evsel__is_cache_op_valid(cache_type, cache_op))
return -EINVAL;
str += len + 1;
} else {
cache_result = parse_aliases(str, evsel__hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
if (cache_result >= 0)
str += len + 1;
}
}
if (str < name_end) {
if (cache_op < 0) {
cache_op = parse_aliases(str, evsel__hw_cache_op,
PERF_COUNT_HW_CACHE_OP_MAX, &len);
if (cache_op >= 0) {
if (!evsel__is_cache_op_valid(cache_type, cache_op))
return -EINVAL;
}
} else if (cache_result < 0) {
cache_result = parse_aliases(str, evsel__hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
}
}
/*
* Fall back to reads:
*/
if (cache_op == -1)
cache_op = PERF_COUNT_HW_CACHE_OP_READ;
/*
* Fall back to accesses:
*/
if (cache_result == -1)
cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
*config = cache_type | (cache_op << 8) | (cache_result << 16);
if (perf_pmus__supports_extended_type())
*config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
return 0;
}
/**
* parse_events__filter_pmu - returns false if a wildcard PMU should be
* considered, true if it should be filtered.
*/
bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
const struct perf_pmu *pmu)
{
if (parse_state->pmu_filter == NULL)
return false;
return strcmp(parse_state->pmu_filter, pmu->name) != 0;
}
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
struct list_head *head_config)
{
struct perf_pmu *pmu = NULL;
bool found_supported = false;
const char *config_name = get_config_name(head_config);
const char *metric_id = get_config_metric_id(head_config);
/* Legacy cache events are only supported by core PMUs. */
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
LIST_HEAD(config_terms);
struct perf_event_attr attr;
int ret;
if (parse_events__filter_pmu(parse_state, pmu))
continue;
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_HW_CACHE;
ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
if (ret)
return ret;
found_supported = true;
if (head_config) {
if (config_attr(&attr, head_config, parse_state->error, config_term_common))
return -EINVAL;
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
}
if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
/*cpu_list=*/NULL) == NULL)
return -ENOMEM;
free_config_terms(&config_terms);
}
return found_supported ? 0 : -EINVAL;
}
#ifdef HAVE_LIBTRACEEVENT
static void tracepoint_error(struct parse_events_error *e, int err,
const char *sys, const char *name, int column)
{
const char *str;
char help[BUFSIZ];
if (!e)
return;
/*
* We get error directly from syscall errno ( > 0),
* or from encoded pointer's error ( < 0).
*/
err = abs(err);
switch (err) {
case EACCES:
str = "can't access trace events";
break;
case ENOENT:
str = "unknown tracepoint";
break;
default:
str = "failed to add tracepoint";
break;
}
tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
parse_events_error__handle(e, column, strdup(str), strdup(help));
}
static int add_tracepoint(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct list_head *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
if (IS_ERR(evsel)) {
tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
return PTR_ERR(evsel);
}
if (head_config) {
LIST_HEAD(config_terms);
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
list_splice(&config_terms, &evsel->config_terms);
}
list_add_tail(&evsel->core.node, list);
return 0;
}
static int add_tracepoint_multi_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct list_head *head_config, YYLTYPE *loc)
{
char *evt_path;
struct dirent *evt_ent;
DIR *evt_dir;
int ret = 0, found = 0;
evt_path = get_events_file(sys_name);
if (!evt_path) {
tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
evt_dir = opendir(evt_path);
if (!evt_dir) {
put_events_file(evt_path);
tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
while (!ret && (evt_ent = readdir(evt_dir))) {
if (!strcmp(evt_ent->d_name, ".")
|| !strcmp(evt_ent->d_name, "..")
|| !strcmp(evt_ent->d_name, "enable")
|| !strcmp(evt_ent->d_name, "filter"))
continue;
if (!strglobmatch(evt_ent->d_name, evt_name))
continue;
found++;
ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
err, head_config, loc);
}
if (!found) {
tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
ret = -1;
}
put_events_file(evt_path);
closedir(evt_dir);
return ret;
}
static int add_tracepoint_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct list_head *head_config, YYLTYPE *loc)
{
return strpbrk(evt_name, "*?") ?
add_tracepoint_multi_event(list, idx, sys_name, evt_name,
err, head_config, loc) :
add_tracepoint(list, idx, sys_name, evt_name,
err, head_config, loc);
}
static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct list_head *head_config, YYLTYPE *loc)
{
struct dirent *events_ent;
DIR *events_dir;
int ret = 0;
events_dir = tracing_events__opendir();
if (!events_dir) {
tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
while (!ret && (events_ent = readdir(events_dir))) {
if (!strcmp(events_ent->d_name, ".")
|| !strcmp(events_ent->d_name, "..")
|| !strcmp(events_ent->d_name, "enable")
|| !strcmp(events_ent->d_name, "header_event")
|| !strcmp(events_ent->d_name, "header_page"))
continue;
if (!strglobmatch(events_ent->d_name, sys_name))
continue;
ret = add_tracepoint_event(list, idx, events_ent->d_name,
evt_name, err, head_config, loc);
}
closedir(events_dir);
return ret;
}
#endif /* HAVE_LIBTRACEEVENT */
static int
parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
{
int i;
for (i = 0; i < 3; i++) {
if (!type || !type[i])
break;
#define CHECK_SET_TYPE(bit) \
do { \
if (attr->bp_type & bit) \
return -EINVAL; \
else \
attr->bp_type |= bit; \
} while (0)
switch (type[i]) {
case 'r':
CHECK_SET_TYPE(HW_BREAKPOINT_R);
break;
case 'w':
CHECK_SET_TYPE(HW_BREAKPOINT_W);
break;
case 'x':
CHECK_SET_TYPE(HW_BREAKPOINT_X);
break;
default:
return -EINVAL;
}
}
#undef CHECK_SET_TYPE
if (!attr->bp_type) /* Default */
attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
return 0;
}
int parse_events_add_breakpoint(struct parse_events_state *parse_state,
struct list_head *list,
u64 addr, char *type, u64 len,
struct list_head *head_config __maybe_unused)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
const char *name;
memset(&attr, 0, sizeof(attr));
attr.bp_addr = addr;
if (parse_breakpoint_type(type, &attr))
return -EINVAL;
/* Provide some defaults if len is not specified */
if (!len) {
if (attr.bp_type == HW_BREAKPOINT_X)
len = sizeof(long);
else
len = HW_BREAKPOINT_LEN_4;
}
attr.bp_len = len;
attr.type = PERF_TYPE_BREAKPOINT;
attr.sample_period = 1;
if (head_config) {
if (config_attr(&attr, head_config, parse_state->error,
config_term_common))
return -EINVAL;
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
}
name = get_config_name(head_config);
return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
&config_terms);
}
static int check_type_val(struct parse_events_term *term,
struct parse_events_error *err,
enum parse_events__term_val_type type)
{
if (type == term->type_val)
return 0;
if (err) {
parse_events_error__handle(err, term->err_val,
type == PARSE_EVENTS__TERM_TYPE_NUM
? strdup("expected numeric value")
: strdup("expected string value"),
NULL);
}
return -EINVAL;
}
static bool config_term_shrinked;
static const char *config_term_name(enum parse_events__term_type term_type)
{
/*
* Update according to parse-events.l
*/
static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
[PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
[PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
[PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
[PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
[PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
[PARSE_EVENTS__TERM_TYPE_NAME] = "name",
[PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
[PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
[PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
[PARSE_EVENTS__TERM_TYPE_TIME] = "time",
[PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
[PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
[PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
[PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
[PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
[PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
[PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
[PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
[PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
[PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
[PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
[PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
[PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
[PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
};
if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
return "unknown term";
return config_term_names[term_type];
}
static bool
config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
{
char *err_str;
if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
parse_events_error__handle(err, -1,
strdup("Invalid term_type"), NULL);
return false;
}
if (!config_term_shrinked)
return true;
switch (term_type) {
case PARSE_EVENTS__TERM_TYPE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
return true;
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
case PARSE_EVENTS__TERM_TYPE_TIME:
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
case PARSE_EVENTS__TERM_TYPE_INHERIT:
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
if (!err)
return false;
/* term_type is validated so indexing is safe */
if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
config_term_name(term_type)) >= 0)
parse_events_error__handle(err, -1, err_str, NULL);
return false;
}
}
void parse_events__shrink_config_terms(void)
{
config_term_shrinked = true;
}
static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
{
#define CHECK_TYPE_VAL(type) \
do { \
if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
return -EINVAL; \
} while (0)
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_CONFIG:
CHECK_TYPE_VAL(NUM);
attr->config = term->val.num;
break;
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
CHECK_TYPE_VAL(NUM);
attr->config1 = term->val.num;
break;
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
CHECK_TYPE_VAL(NUM);
attr->config2 = term->val.num;
break;
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
CHECK_TYPE_VAL(NUM);
attr->config3 = term->val.num;
break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
CHECK_TYPE_VAL(STR);
if (strcmp(term->val.str, "no") &&
parse_branch_str(term->val.str,
&attr->branch_sample_type)) {
parse_events_error__handle(err, term->err_val,
strdup("invalid branch sample type"),
NULL);
return -EINVAL;
}
break;
case PARSE_EVENTS__TERM_TYPE_TIME:
CHECK_TYPE_VAL(NUM);
if (term->val.num > 1) {
parse_events_error__handle(err, term->err_val,
strdup("expected 0 or 1"),
NULL);
return -EINVAL;
}
break;
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
CHECK_TYPE_VAL(STR);
break;
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_INHERIT:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_NAME:
CHECK_TYPE_VAL(STR);
break;
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
CHECK_TYPE_VAL(STR);
break;
case PARSE_EVENTS__TERM_TYPE_RAW:
CHECK_TYPE_VAL(STR);
break;
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_PERCORE:
CHECK_TYPE_VAL(NUM);
if ((unsigned int)term->val.num > 1) {
parse_events_error__handle(err, term->err_val,
strdup("expected 0 or 1"),
NULL);
return -EINVAL;
}
break;
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
CHECK_TYPE_VAL(NUM);
if (term->val.num > UINT_MAX) {
parse_events_error__handle(err, term->err_val,
strdup("too big"),
NULL);
return -EINVAL;
}
break;
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
parse_events_error__handle(err, term->err_term,
strdup(config_term_name(term->type_term)),
parse_events_formats_error_string(NULL));
return -EINVAL;
}
/*
* Check term availability after basic checking so
* PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
*
* If check availability at the entry of this function,
* user will see "'<sysfs term>' is not usable in 'perf stat'"
* if an invalid config term is provided for legacy events
* (for example, instructions/badterm/...), which is confusing.
*/
if (!config_term_avail(term->type_term, err))
return -EINVAL;
return 0;
#undef CHECK_TYPE_VAL
}
static int config_term_pmu(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
{
if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
if (!pmu) {
char *err_str;
if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
parse_events_error__handle(err, term->err_term,
err_str, /*help=*/NULL);
return -EINVAL;
}
if (perf_pmu__supports_legacy_cache(pmu)) {
attr->type = PERF_TYPE_HW_CACHE;
return parse_events__decode_legacy_cache(term->config, pmu->type,
&attr->config);
} else
term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
}
if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
if (!pmu) {
char *err_str;
if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
parse_events_error__handle(err, term->err_term,
err_str, /*help=*/NULL);
return -EINVAL;
}
attr->type = PERF_TYPE_HARDWARE;
attr->config = term->val.num;
if (perf_pmus__supports_extended_type())
attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
return 0;
}
if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
/*
* Always succeed for sysfs terms, as we dont know
* at this point what type they need to have.
*/
return 0;
}
return config_term_common(attr, term, err);
}
#ifdef HAVE_LIBTRACEEVENT
static int config_term_tracepoint(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
{
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
case PARSE_EVENTS__TERM_TYPE_INHERIT:
case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
return config_term_common(attr, term, err);
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
case PARSE_EVENTS__TERM_TYPE_TIME:
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
if (err) {
parse_events_error__handle(err, term->err_term,
strdup(config_term_name(term->type_term)),
strdup("valid terms: call-graph,stack-size\n"));
}
return -EINVAL;
}
return 0;
}
#endif
static int config_attr(struct perf_event_attr *attr,
struct list_head *head,
struct parse_events_error *err,
config_term_func_t config_term)
{
struct parse_events_term *term;
list_for_each_entry(term, head, list)
if (config_term(attr, term, err))
return -EINVAL;
return 0;
}
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused)
{
#define ADD_CONFIG_TERM(__type, __weak) \
struct evsel_config_term *__t; \
\
__t = zalloc(sizeof(*__t)); \
if (!__t) \
return -ENOMEM; \
\
INIT_LIST_HEAD(&__t->list); \
__t->type = EVSEL__CONFIG_TERM_ ## __type; \
__t->weak = __weak; \
list_add_tail(&__t->list, head_terms)
#define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
do { \
ADD_CONFIG_TERM(__type, __weak); \
__t->val.__name = __val; \
} while (0)
#define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
do { \
ADD_CONFIG_TERM(__type, __weak); \
__t->val.str = strdup(__val); \
if (!__t->val.str) { \
zfree(&__t); \
return -ENOMEM; \
} \
__t->free_str = true; \
} while (0)
struct parse_events_term *term;
list_for_each_entry(term, head_config, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_TIME:
ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
term->val.num, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_INHERIT:
ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 1 : 0, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 0 : 1, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
term->val.num, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
term->val.num, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 1 : 0, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 0 : 1, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_PERCORE:
ADD_CONFIG_TERM_VAL(PERCORE, percore,
term->val.num ? true : false, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
term->val.num ? 1 : 0, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num, term->weak);
break;
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
break;
}
}
return 0;
}
/*
* Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
* each bit of attr->config that the user has changed.
*/
static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
struct list_head *head_terms)
{
struct parse_events_term *term;
u64 bits = 0;
int type;
list_for_each_entry(term, head_config, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_USER:
type = perf_pmu__format_type(pmu, term->config);
if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
continue;
bits |= perf_pmu__format_bits(pmu, term->config);
break;
case PARSE_EVENTS__TERM_TYPE_CONFIG:
bits = ~(u64)0;
break;
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
case PARSE_EVENTS__TERM_TYPE_TIME:
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
case PARSE_EVENTS__TERM_TYPE_INHERIT:
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
break;
}
}
if (bits)
ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
#undef ADD_CONFIG_TERM
return 0;
}
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *err,
struct list_head *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
#ifdef HAVE_LIBTRACEEVENT
if (head_config) {
struct perf_event_attr attr;
if (config_attr(&attr, head_config, err,
config_term_tracepoint))
return -EINVAL;
}
if (strpbrk(sys, "*?"))
return add_tracepoint_multi_sys(list, idx, sys, event,
err, head_config, loc);
else
return add_tracepoint_event(list, idx, sys, event,
err, head_config, loc);
#else
(void)list;
(void)idx;
(void)sys;
(void)event;
(void)head_config;
parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"),
strdup("libtraceevent is necessary for tracepoint support"));
return -1;
#endif
}
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_pmu *pmu, u32 type, u32 extended_type,
u64 config, struct list_head *head_config)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
const char *name, *metric_id;
int ret;
memset(&attr, 0, sizeof(attr));
attr.type = type;
attr.config = config;
if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
assert(perf_pmus__supports_extended_type());
attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
}
if (head_config) {
if (config_attr(&attr, head_config, parse_state->error,
config_term_common))
return -EINVAL;
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
}
name = get_config_name(head_config);
metric_id = get_config_metric_id(head_config);
ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
/*cpu_list=*/NULL) ? 0 : -ENOMEM;
free_config_terms(&config_terms);
return ret;
}
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
struct list_head *head_config,
bool wildcard)
{
struct perf_pmu *pmu = NULL;
bool found_supported = false;
/* Wildcards on numeric values are only supported by core PMUs. */
if (wildcard && perf_pmus__supports_extended_type()) {
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
int ret;
found_supported = true;
if (parse_events__filter_pmu(parse_state, pmu))
continue;
ret = __parse_events_add_numeric(parse_state, list, pmu,
type, pmu->type,
config, head_config);
if (ret)
return ret;
}
if (found_supported)
return 0;
}
return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
type, /*extended_type=*/0, config, head_config);
}
int parse_events_add_tool(struct parse_events_state *parse_state,
struct list_head *list,
int tool_event)
{
return add_event_tool(list, &parse_state->idx, tool_event);
}
static bool config_term_percore(struct list_head *config_terms)
{
struct evsel_config_term *term;
list_for_each_entry(term, config_terms, list) {
if (term->type == EVSEL__CONFIG_TERM_PERCORE)
return term->val.percore;
}
return false;
}
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, const char *name,
struct list_head *head_config,
bool auto_merge_stats, void *loc_)
{
struct perf_event_attr attr;
struct perf_pmu_info info;
struct perf_pmu *pmu;
struct evsel *evsel;
struct parse_events_error *err = parse_state->error;
YYLTYPE *loc = loc_;
LIST_HEAD(config_terms);
pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
if (!pmu) {
char *err_str;
if (asprintf(&err_str,
"Cannot find PMU `%s'. Missing kernel support?",
name) >= 0)
parse_events_error__handle(err, loc->first_column, err_str, NULL);
return -EINVAL;
}
if (verbose > 1) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
if (pmu->selectable && !head_config) {
strbuf_addf(&sb, "%s//", name);
} else {
strbuf_addf(&sb, "%s/", name);
parse_events_term__to_strbuf(head_config, &sb);
strbuf_addch(&sb, '/');
}
fprintf(stderr, "Attempt to add: %s\n", sb.buf);
strbuf_release(&sb);
}
if (head_config)
fix_raw(head_config, pmu);
if (pmu->default_config) {
memcpy(&attr, pmu->default_config,
sizeof(struct perf_event_attr));
} else {
memset(&attr, 0, sizeof(attr));
}
attr.type = pmu->type;
if (!head_config) {
evsel = __add_event(list, &parse_state->idx, &attr,
/*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, pmu,
/*config_terms=*/NULL, auto_merge_stats,
/*cpu_list=*/NULL);
return evsel ? 0 : -ENOMEM;
}
if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info, err))
return -EINVAL;
if (verbose > 1) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
parse_events_term__to_strbuf(head_config, &sb);
fprintf(stderr, "..after resolving event: %s/%s/\n", name, sb.buf);
strbuf_release(&sb);
}
/*
* Configure hardcoded terms first, no need to check
* return value when called with fail == 0 ;)
*/
if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
return -EINVAL;
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
/*
* When using default config, record which bits of attr->config were
* changed by the user.
*/
if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
return -ENOMEM;
if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
free_config_terms(&config_terms);
return -EINVAL;
}
evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
get_config_name(head_config),
get_config_metric_id(head_config), pmu,
&config_terms, auto_merge_stats, /*cpu_list=*/NULL);
if (!evsel)
return -ENOMEM;
if (evsel->name)
evsel->use_config_name = true;
evsel->percore = config_term_percore(&evsel->config_terms);
if (parse_state->fake_pmu)
return 0;
free((char *)evsel->unit);
evsel->unit = strdup(info.unit);
evsel->scale = info.scale;
evsel->per_pkg = info.per_pkg;
evsel->snapshot = info.snapshot;
return 0;
}
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
char *str, struct list_head *head,
struct list_head **listp, void *loc_)
{
struct parse_events_term *term;
struct list_head *list = NULL;
struct list_head *orig_head = NULL;
struct perf_pmu *pmu = NULL;
YYLTYPE *loc = loc_;
int ok = 0;
const char *config;
*listp = NULL;
if (!head) {
head = malloc(sizeof(struct list_head));
if (!head)
goto out_err;
INIT_LIST_HEAD(head);
}
config = strdup(str);
if (!config)
goto out_err;
if (parse_events_term__num(&term,
PARSE_EVENTS__TERM_TYPE_USER,
config, /*num=*/1, /*novalue=*/true,
loc, /*loc_val=*/NULL) < 0) {
zfree(&config);
goto out_err;
}
list_add_tail(&term->list, head);
/* Add it for all PMUs that support the alias */
list = malloc(sizeof(struct list_head));
if (!list)
goto out_err;
INIT_LIST_HEAD(list);
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
bool auto_merge_stats;
if (parse_events__filter_pmu(parse_state, pmu))
continue;
if (!perf_pmu__have_event(pmu, str))
continue;
auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
parse_events_copy_term_list(head, &orig_head);
if (!parse_events_add_pmu(parse_state, list, pmu->name,
orig_head, auto_merge_stats, loc)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
parse_events_term__to_strbuf(orig_head, &sb);
pr_debug("%s -> %s/%s/\n", str, pmu->name, sb.buf);
strbuf_release(&sb);
ok++;
}
parse_events_terms__delete(orig_head);
}
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, list, str, head,
/*auto_merge_stats=*/true, loc)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
parse_events_term__to_strbuf(head, &sb);
pr_debug("%s -> %s/%s/\n", str, "fake_pmu", sb.buf);
strbuf_release(&sb);
ok++;
}
}
out_err:
if (ok)
*listp = list;
else
free(list);
parse_events_terms__delete(head);
return ok ? 0 : -1;
}
int parse_events__modifier_group(struct list_head *list,
char *event_mod)
{
return parse_events__modifier_event(list, event_mod, true);
}
void parse_events__set_leader(char *name, struct list_head *list)
{
struct evsel *leader;
if (list_empty(list)) {
WARN_ONCE(true, "WARNING: failed to set leader: empty list");
return;
}
leader = list_first_entry(list, struct evsel, core.node);
__perf_evlist__set_leader(list, &leader->core);
leader->group_name = name;
}
/* list_event is assumed to point to malloc'ed memory */
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all)
{
/*
* Called for single event definition. Update the
* 'all event' list, and reinit the 'single event'
* list, for next event definition.
*/
list_splice_tail(list_event, list_all);
free(list_event);
}
struct event_modifier {
int eu;
int ek;
int eh;
int eH;
int eG;
int eI;
int precise;
int precise_max;
int exclude_GH;
int sample_read;
int pinned;
int weak;
int exclusive;
int bpf_counter;
};
static int get_event_modifier(struct event_modifier *mod, char *str,
struct evsel *evsel)
{
int eu = evsel ? evsel->core.attr.exclude_user : 0;
int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
int eh = evsel ? evsel->core.attr.exclude_hv : 0;
int eH = evsel ? evsel->core.attr.exclude_host : 0;
int eG = evsel ? evsel->core.attr.exclude_guest : 0;
int eI = evsel ? evsel->core.attr.exclude_idle : 0;
int precise = evsel ? evsel->core.attr.precise_ip : 0;
int precise_max = 0;
int sample_read = 0;
int pinned = evsel ? evsel->core.attr.pinned : 0;
int exclusive = evsel ? evsel->core.attr.exclusive : 0;
int exclude = eu | ek | eh;
int exclude_GH = evsel ? evsel->exclude_GH : 0;
int weak = 0;
int bpf_counter = 0;
memset(mod, 0, sizeof(*mod));
while (*str) {
if (*str == 'u') {
if (!exclude)
exclude = eu = ek = eh = 1;
if (!exclude_GH && !perf_guest)
eG = 1;
eu = 0;
} else if (*str == 'k') {
if (!exclude)
exclude = eu = ek = eh = 1;
ek = 0;
} else if (*str == 'h') {
if (!exclude)
exclude = eu = ek = eh = 1;
eh = 0;
} else if (*str == 'G') {
if (!exclude_GH)
exclude_GH = eG = eH = 1;
eG = 0;
} else if (*str == 'H') {
if (!exclude_GH)
exclude_GH = eG = eH = 1;
eH = 0;
} else if (*str == 'I') {
eI = 1;
} else if (*str == 'p') {
precise++;
/* use of precise requires exclude_guest */
if (!exclude_GH)
eG = 1;
} else if (*str == 'P') {
precise_max = 1;
} else if (*str == 'S') {
sample_read = 1;
} else if (*str == 'D') {
pinned = 1;
} else if (*str == 'e') {
exclusive = 1;
} else if (*str == 'W') {
weak = 1;
} else if (*str == 'b') {
bpf_counter = 1;
} else
break;
++str;
}
/*
* precise ip:
*
* 0 - SAMPLE_IP can have arbitrary skid
* 1 - SAMPLE_IP must have constant skid
* 2 - SAMPLE_IP requested to have 0 skid
* 3 - SAMPLE_IP must have 0 skid
*
* See also PERF_RECORD_MISC_EXACT_IP
*/
if (precise > 3)
return -EINVAL;
mod->eu = eu;
mod->ek = ek;
mod->eh = eh;
mod->eH = eH;
mod->eG = eG;
mod->eI = eI;
mod->precise = precise;
mod->precise_max = precise_max;
mod->exclude_GH = exclude_GH;
mod->sample_read = sample_read;
mod->pinned = pinned;
mod->weak = weak;
mod->bpf_counter = bpf_counter;
mod->exclusive = exclusive;
return 0;
}
/*
* Basic modifier sanity check to validate it contains only one
* instance of any modifier (apart from 'p') present.
*/
static int check_modifier(char *str)
{
char *p = str;
/* The sizeof includes 0 byte as well. */
if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
return -1;
while (*p) {
if (*p != 'p' && strchr(p + 1, *p))
return -1;
p++;
}
return 0;
}
int parse_events__modifier_event(struct list_head *list, char *str, bool add)
{
struct evsel *evsel;
struct event_modifier mod;
if (str == NULL)
return 0;
if (check_modifier(str))
return -EINVAL;
if (!add && get_event_modifier(&mod, str, NULL))
return -EINVAL;
__evlist__for_each_entry(list, evsel) {
if (add && get_event_modifier(&mod, str, evsel))
return -EINVAL;
evsel->core.attr.exclude_user = mod.eu;
evsel->core.attr.exclude_kernel = mod.ek;
evsel->core.attr.exclude_hv = mod.eh;
evsel->core.attr.precise_ip = mod.precise;
evsel->core.attr.exclude_host = mod.eH;
evsel->core.attr.exclude_guest = mod.eG;
evsel->core.attr.exclude_idle = mod.eI;
evsel->exclude_GH = mod.exclude_GH;
evsel->sample_read = mod.sample_read;
evsel->precise_max = mod.precise_max;
evsel->weak_group = mod.weak;
evsel->bpf_counter = mod.bpf_counter;
if (evsel__is_group_leader(evsel)) {
evsel->core.attr.pinned = mod.pinned;
evsel->core.attr.exclusive = mod.exclusive;
}
}
return 0;
}
int parse_events_name(struct list_head *list, const char *name)
{
struct evsel *evsel;
__evlist__for_each_entry(list, evsel) {
if (!evsel->name) {
evsel->name = strdup(name);
if (!evsel->name)
return -ENOMEM;
}
}
return 0;
}
static int parse_events__scanner(const char *str,
FILE *input,
struct parse_events_state *parse_state)
{
YY_BUFFER_STATE buffer;
void *scanner;
int ret;
ret = parse_events_lex_init_extra(parse_state, &scanner);
if (ret)
return ret;
if (str)
buffer = parse_events__scan_string(str, scanner);
else
parse_events_set_in(input, scanner);
#ifdef PARSER_DEBUG
parse_events_debug = 1;
parse_events_set_debug(1, scanner);
#endif
ret = parse_events_parse(parse_state, scanner);
if (str) {
parse_events__flush_buffer(buffer, scanner);
parse_events__delete_buffer(buffer, scanner);
}
parse_events_lex_destroy(scanner);
return ret;
}
/*
* parse event config string, return a list of event terms.
*/
int parse_events_terms(struct list_head *terms, const char *str, FILE *input)
{
struct parse_events_state parse_state = {
.terms = NULL,
.stoken = PE_START_TERMS,
};
int ret;
ret = parse_events__scanner(str, input, &parse_state);
if (!ret) {
list_splice(parse_state.terms, terms);
zfree(&parse_state.terms);
return 0;
}
parse_events_terms__delete(parse_state.terms);
return ret;
}
static int evsel__compute_group_pmu_name(struct evsel *evsel,
const struct list_head *head)
{
struct evsel *leader = evsel__leader(evsel);
struct evsel *pos;
const char *group_pmu_name;
struct perf_pmu *pmu = evsel__find_pmu(evsel);
if (!pmu) {
/*
* For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
* is a core PMU, but in heterogeneous systems this is
* unknown. For now pick the first core PMU.
*/
pmu = perf_pmus__scan_core(NULL);
}
if (!pmu) {
pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
return -EINVAL;
}
group_pmu_name = pmu->name;
/*
* Software events may be in a group with other uncore PMU events. Use
* the pmu_name of the first non-software event to avoid breaking the
* software event out of the group.
*
* Aux event leaders, like intel_pt, expect a group with events from
* other PMUs, so substitute the AUX event's PMU in this case.
*/
if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
if (!leader_pmu) {
/* As with determining pmu above. */
leader_pmu = perf_pmus__scan_core(NULL);
}
/*
* Starting with the leader, find the first event with a named
* non-software PMU. for_each_group_(member|evsel) isn't used as
* the list isn't yet sorted putting evsel's in the same group
* together.
*/
if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
group_pmu_name = leader_pmu->name;
} else if (leader->core.nr_members > 1) {
list_for_each_entry(pos, head, core.node) {
struct perf_pmu *pos_pmu;
if (pos == leader || evsel__leader(pos) != leader)
continue;
pos_pmu = evsel__find_pmu(pos);
if (!pos_pmu) {
/* As with determining pmu above. */
pos_pmu = perf_pmus__scan_core(NULL);
}
if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
group_pmu_name = pos_pmu->name;
break;
}
}
}
}
/* Assign the actual name taking care that the fake PMU lacks a name. */
evsel->group_pmu_name = strdup(group_pmu_name ?: "fake");
return evsel->group_pmu_name ? 0 : -ENOMEM;
}
__weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
{
/* Order by insertion index. */
return lhs->core.idx - rhs->core.idx;
}
static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
{
const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
int *force_grouped_idx = _fg_idx;
int lhs_sort_idx, rhs_sort_idx, ret;
const char *lhs_pmu_name, *rhs_pmu_name;
bool lhs_has_group, rhs_has_group;
/*
* First sort by grouping/leader. Read the leader idx only if the evsel
* is part of a group, by default ungrouped events will be sorted
* relative to grouped events based on where the first ungrouped event
* occurs. If both events don't have a group we want to fall-through to
* the arch specific sorting, that can reorder and fix things like
* Intel's topdown events.
*/
if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
lhs_has_group = true;
lhs_sort_idx = lhs_core->leader->idx;
} else {
lhs_has_group = false;
lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)
? *force_grouped_idx
: lhs_core->idx;
}
if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
rhs_has_group = true;
rhs_sort_idx = rhs_core->leader->idx;
} else {
rhs_has_group = false;
rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)
? *force_grouped_idx
: rhs_core->idx;
}
if (lhs_sort_idx != rhs_sort_idx)
return lhs_sort_idx - rhs_sort_idx;
/* Group by PMU if there is a group. Groups can't span PMUs. */
if (lhs_has_group && rhs_has_group) {
lhs_pmu_name = lhs->group_pmu_name;
rhs_pmu_name = rhs->group_pmu_name;
ret = strcmp(lhs_pmu_name, rhs_pmu_name);
if (ret)
return ret;
}
/* Architecture specific sorting. */
return arch_evlist__cmp(lhs, rhs);
}
static int parse_events__sort_events_and_fix_groups(struct list_head *list)
{
int idx = 0, force_grouped_idx = -1;
struct evsel *pos, *cur_leader = NULL;
struct perf_evsel *cur_leaders_grp = NULL;
bool idx_changed = false, cur_leader_force_grouped = false;
int orig_num_leaders = 0, num_leaders = 0;
int ret;
/*
* Compute index to insert ungrouped events at. Place them where the
* first ungrouped event appears.
*/
list_for_each_entry(pos, list, core.node) {
const struct evsel *pos_leader = evsel__leader(pos);
ret = evsel__compute_group_pmu_name(pos, list);
if (ret)
return ret;
if (pos == pos_leader)
orig_num_leaders++;
/*
* Ensure indexes are sequential, in particular for multiple
* event lists being merged. The indexes are used to detect when
* the user order is modified.
*/
pos->core.idx = idx++;
/* Remember an index to sort all forced grouped events together to. */
if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 &&
arch_evsel__must_be_in_group(pos))
force_grouped_idx = pos->core.idx;
}
/* Sort events. */
list_sort(&force_grouped_idx, list, evlist__cmp);
/*
* Recompute groups, splitting for PMUs and adding groups for events
* that require them.
*/
idx = 0;
list_for_each_entry(pos, list, core.node) {
const struct evsel *pos_leader = evsel__leader(pos);
const char *pos_pmu_name = pos->group_pmu_name;
const char *cur_leader_pmu_name;
bool pos_force_grouped = force_grouped_idx != -1 &&
arch_evsel__must_be_in_group(pos);
/* Reset index and nr_members. */
if (pos->core.idx != idx)
idx_changed = true;
pos->core.idx = idx++;
pos->core.nr_members = 0;
/*
* Set the group leader respecting the given groupings and that
* groups can't span PMUs.
*/
if (!cur_leader)
cur_leader = pos;
cur_leader_pmu_name = cur_leader->group_pmu_name;
if ((cur_leaders_grp != pos->core.leader &&
(!pos_force_grouped || !cur_leader_force_grouped)) ||
strcmp(cur_leader_pmu_name, pos_pmu_name)) {
/* Event is for a different group/PMU than last. */
cur_leader = pos;
/*
* Remember the leader's group before it is overwritten,
* so that later events match as being in the same
* group.
*/
cur_leaders_grp = pos->core.leader;
/*
* Avoid forcing events into groups with events that
* don't need to be in the group.
*/
cur_leader_force_grouped = pos_force_grouped;
}
if (pos_leader != cur_leader) {
/* The leader changed so update it. */
evsel__set_leader(pos, cur_leader);
}
}
list_for_each_entry(pos, list, core.node) {
struct evsel *pos_leader = evsel__leader(pos);
if (pos == pos_leader)
num_leaders++;
pos_leader->core.nr_members++;
}
return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
}
int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
struct parse_events_error *err, struct perf_pmu *fake_pmu,
bool warn_if_reordered)
{
struct parse_events_state parse_state = {
.list = LIST_HEAD_INIT(parse_state.list),
.idx = evlist->core.nr_entries,
.error = err,
.stoken = PE_START_EVENTS,
.fake_pmu = fake_pmu,
.pmu_filter = pmu_filter,
.match_legacy_cache_terms = true,
};
int ret, ret2;
ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
if (!ret && list_empty(&parse_state.list)) {
WARN_ONCE(true, "WARNING: event parser found nothing\n");
return -1;
}
ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
if (ret2 < 0)
return ret;
if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus)
pr_warning("WARNING: events were regrouped to match PMUs\n");
/*
* Add list to the evlist even with errors to allow callers to clean up.
*/
evlist__splice_list_tail(evlist, &parse_state.list);
if (!ret) {
struct evsel *last;
last = evlist__last(evlist);
last->cmdline_group_boundary = true;
return 0;
}
/*
* There are 2 users - builtin-record and builtin-test objects.
* Both call evlist__delete in case of error, so we dont
* need to bother.
*/
return ret;
}
int parse_event(struct evlist *evlist, const char *str)
{
struct parse_events_error err;
int ret;
parse_events_error__init(&err);
ret = parse_events(evlist, str, &err);
parse_events_error__exit(&err);
return ret;
}
void parse_events_error__init(struct parse_events_error *err)
{
bzero(err, sizeof(*err));
}
void parse_events_error__exit(struct parse_events_error *err)
{
zfree(&err->str);
zfree(&err->help);
zfree(&err->first_str);
zfree(&err->first_help);
}
void parse_events_error__handle(struct parse_events_error *err, int idx,
char *str, char *help)
{
if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
goto out_free;
switch (err->num_errors) {
case 0:
err->idx = idx;
err->str = str;
err->help = help;
break;
case 1:
err->first_idx = err->idx;
err->idx = idx;
err->first_str = err->str;
err->str = str;
err->first_help = err->help;
err->help = help;
break;
default:
pr_debug("Multiple errors dropping message: %s (%s)\n",
err->str, err->help ?: "<no help>");
free(err->str);
err->str = str;
free(err->help);
err->help = help;
break;
}
err->num_errors++;
return;
out_free:
free(str);
free(help);
}
#define MAX_WIDTH 1000
static int get_term_width(void)
{
struct winsize ws;
get_term_dimensions(&ws);
return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
}
static void __parse_events_error__print(int err_idx, const char *err_str,
const char *err_help, const char *event)
{
const char *str = "invalid or unsupported event: ";
char _buf[MAX_WIDTH];
char *buf = (char *) event;
int idx = 0;
if (err_str) {
/* -2 for extra '' in the final fprintf */
int width = get_term_width() - 2;
int len_event = strlen(event);
int len_str, max_len, cut = 0;
/*
* Maximum error index indent, we will cut
* the event string if it's bigger.
*/
int max_err_idx = 13;
/*
* Let's be specific with the message when
* we have the precise error.
*/
str = "event syntax error: ";
len_str = strlen(str);
max_len = width - len_str;
buf = _buf;
/* We're cutting from the beginning. */
if (err_idx > max_err_idx)
cut = err_idx - max_err_idx;
strncpy(buf, event + cut, max_len);
/* Mark cut parts with '..' on both sides. */
if (cut)
buf[0] = buf[1] = '.';
if ((len_event - cut) > max_len) {
buf[max_len - 1] = buf[max_len - 2] = '.';
buf[max_len] = 0;
}
idx = len_str + err_idx - cut;
}
fprintf(stderr, "%s'%s'\n", str, buf);
if (idx) {
fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
if (err_help)
fprintf(stderr, "\n%s\n", err_help);
}
}
void parse_events_error__print(struct parse_events_error *err,
const char *event)
{
if (!err->num_errors)
return;
__parse_events_error__print(err->idx, err->str, err->help, event);
if (err->num_errors > 1) {
fputs("\nInitial error:\n", stderr);
__parse_events_error__print(err->first_idx, err->first_str,
err->first_help, event);
}
}
#undef MAX_WIDTH
int parse_events_option(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct parse_events_option_args *args = opt->value;
struct parse_events_error err;
int ret;
parse_events_error__init(&err);
ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
/*fake_pmu=*/NULL, /*warn_if_reordered=*/true);
if (ret) {
parse_events_error__print(&err, str);
fprintf(stderr, "Run 'perf list' for a list of valid events\n");
}
parse_events_error__exit(&err);
return ret;
}
int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
{
struct parse_events_option_args *args = opt->value;
int ret;
if (*args->evlistp == NULL) {
*args->evlistp = evlist__new();
if (*args->evlistp == NULL) {
fprintf(stderr, "Not enough memory to create evlist\n");
return -1;
}
}
ret = parse_events_option(opt, str, unset);
if (ret) {
evlist__delete(*args->evlistp);
*args->evlistp = NULL;
}
return ret;
}
static int
foreach_evsel_in_last_glob(struct evlist *evlist,
int (*func)(struct evsel *evsel,
const void *arg),
const void *arg)
{
struct evsel *last = NULL;
int err;
/*
* Don't return when list_empty, give func a chance to report
* error when it found last == NULL.
*
* So no need to WARN here, let *func do this.
*/
if (evlist->core.nr_entries > 0)
last = evlist__last(evlist);
do {
err = (*func)(last, arg);
if (err)
return -1;
if (!last)
return 0;
if (last->core.node.prev == &evlist->core.entries)
return 0;
last = list_entry(last->core.node.prev, struct evsel, core.node);
} while (!last->cmdline_group_boundary);
return 0;
}
static int set_filter(struct evsel *evsel, const void *arg)
{
const char *str = arg;
bool found = false;
int nr_addr_filters = 0;
struct perf_pmu *pmu = NULL;
if (evsel == NULL) {
fprintf(stderr,
"--filter option should follow a -e tracepoint or HW tracer option\n");
return -1;
}
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
if (evsel__append_tp_filter(evsel, str) < 0) {
fprintf(stderr,
"not enough memory to hold filter string\n");
return -1;
}
return 0;
}
while ((pmu = perf_pmus__scan(pmu)) != NULL)
if (pmu->type == evsel->core.attr.type) {
found = true;
break;
}
if (found)
perf_pmu__scan_file(pmu, "nr_addr_filters",
"%d", &nr_addr_filters);
if (!nr_addr_filters)
return perf_bpf_filter__parse(&evsel->bpf_filters, str);
if (evsel__append_addr_filter(evsel, str) < 0) {
fprintf(stderr,
"not enough memory to hold filter string\n");
return -1;
}
return 0;
}
int parse_filter(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct evlist *evlist = *(struct evlist **)opt->value;
return foreach_evsel_in_last_glob(evlist, set_filter,
(const void *)str);
}
static int add_exclude_perf_filter(struct evsel *evsel,
const void *arg __maybe_unused)
{
char new_filter[64];
if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
fprintf(stderr,
"--exclude-perf option should follow a -e tracepoint option\n");
return -1;
}
snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
if (evsel__append_tp_filter(evsel, new_filter) < 0) {
fprintf(stderr,
"not enough memory to hold filter string\n");
return -1;
}
return 0;
}
int exclude_perf(const struct option *opt,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
struct evlist *evlist = *(struct evlist **)opt->value;
return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
NULL);
}
int parse_events__is_hardcoded_term(struct parse_events_term *term)
{
return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
}
static int new_term(struct parse_events_term **_term,
struct parse_events_term *temp,
char *str, u64 num)
{
struct parse_events_term *term;
term = malloc(sizeof(*term));
if (!term)
return -ENOMEM;
*term = *temp;
INIT_LIST_HEAD(&term->list);
term->weak = false;
switch (term->type_val) {
case PARSE_EVENTS__TERM_TYPE_NUM:
term->val.num = num;
break;
case PARSE_EVENTS__TERM_TYPE_STR:
term->val.str = str;
break;
default:
free(term);
return -EINVAL;
}
*_term = term;
return 0;
}
int parse_events_term__num(struct parse_events_term **term,
enum parse_events__term_type type_term,
const char *config, u64 num,
bool no_value,
void *loc_term_, void *loc_val_)
{
YYLTYPE *loc_term = loc_term_;
YYLTYPE *loc_val = loc_val_;
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = type_term,
.config = config ? : strdup(config_term_name(type_term)),
.no_value = no_value,
.err_term = loc_term ? loc_term->first_column : 0,
.err_val = loc_val ? loc_val->first_column : 0,
};
return new_term(term, &temp, /*str=*/NULL, num);
}
int parse_events_term__str(struct parse_events_term **term,
enum parse_events__term_type type_term,
char *config, char *str,
void *loc_term_, void *loc_val_)
{
YYLTYPE *loc_term = loc_term_;
YYLTYPE *loc_val = loc_val_;
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_STR,
.type_term = type_term,
.config = config,
.err_term = loc_term ? loc_term->first_column : 0,
.err_val = loc_val ? loc_val->first_column : 0,
};
return new_term(term, &temp, str, /*num=*/0);
}
int parse_events_term__term(struct parse_events_term **term,
enum parse_events__term_type term_lhs,
enum parse_events__term_type term_rhs,
void *loc_term, void *loc_val)
{
return parse_events_term__str(term, term_lhs, NULL,
strdup(config_term_name(term_rhs)),
loc_term, loc_val);
}
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term)
{
char *str;
struct parse_events_term temp = *term;
temp.used = false;
if (term->config) {
temp.config = strdup(term->config);
if (!temp.config)
return -ENOMEM;
}
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
return new_term(new, &temp, /*str=*/NULL, term->val.num);
str = strdup(term->val.str);
if (!str)
return -ENOMEM;
return new_term(new, &temp, str, /*num=*/0);
}
void parse_events_term__delete(struct parse_events_term *term)
{
if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
zfree(&term->val.str);
zfree(&term->config);
free(term);
}
int parse_events_copy_term_list(struct list_head *old,
struct list_head **new)
{
struct parse_events_term *term, *n;
int ret;
if (!old) {
*new = NULL;
return 0;
}
*new = malloc(sizeof(struct list_head));
if (!*new)
return -ENOMEM;
INIT_LIST_HEAD(*new);
list_for_each_entry (term, old, list) {
ret = parse_events_term__clone(&n, term);
if (ret)
return ret;
list_add_tail(&n->list, *new);
}
return 0;
}
void parse_events_terms__purge(struct list_head *terms)
{
struct parse_events_term *term, *h;
list_for_each_entry_safe(term, h, terms, list) {
list_del_init(&term->list);
parse_events_term__delete(term);
}
}
void parse_events_terms__delete(struct list_head *terms)
{
if (!terms)
return;
parse_events_terms__purge(terms);
free(terms);
}
int parse_events_term__to_strbuf(struct list_head *term_list, struct strbuf *sb)
{
struct parse_events_term *term;
bool first = true;
if (!term_list)
return 0;
list_for_each_entry(term, term_list, list) {
int ret;
if (!first) {
ret = strbuf_addch(sb, ',');
if (ret < 0)
return ret;
}
first = false;
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
if (term->no_value) {
assert(term->val.num == 1);
ret = strbuf_addf(sb, "%s", term->config);
} else
ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
if (term->config) {
ret = strbuf_addf(sb, "%s=", term->config);
if (ret < 0)
return ret;
} else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
ret = strbuf_addf(sb, "%s=", config_term_name(term->type_term));
if (ret < 0)
return ret;
}
assert(!term->no_value);
ret = strbuf_addf(sb, "%s", term->val.str);
}
if (ret < 0)
return ret;
}
return 0;
}
void parse_events_evlist_error(struct parse_events_state *parse_state,
int idx, const char *str)
{
if (!parse_state->error)
return;
parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
}
static void config_terms_list(char *buf, size_t buf_sz)
{
int i;
bool first = true;
buf[0] = '\0';
for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
const char *name = config_term_name(i);
if (!config_term_avail(i, NULL))
continue;
if (!name)
continue;
if (name[0] == '<')
continue;
if (strlen(buf) + strlen(name) + 2 >= buf_sz)
return;
if (!first)
strcat(buf, ",");
else
first = false;
strcat(buf, name);
}
}
/*
* Return string contains valid config terms of an event.
* @additional_terms: For terms such as PMU sysfs terms.
*/
char *parse_events_formats_error_string(char *additional_terms)
{
char *str;
/* "no-overwrite" is the longest name */
char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
(sizeof("no-overwrite") - 1)];
config_terms_list(static_terms, sizeof(static_terms));
/* valid terms */
if (additional_terms) {
if (asprintf(&str, "valid terms: %s,%s",
additional_terms, static_terms) < 0)
goto fail;
} else {
if (asprintf(&str, "valid terms: %s", static_terms) < 0)
goto fail;
}
return str;
fail:
return NULL;
}
| linux-master | tools/perf/util/parse-events.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwarf-regs.c : Mapping of DWARF debug register numbers into register names.
*
* Written by: Masami Hiramatsu <[email protected]>
*/
#include <debug.h>
#include <dwarf-regs.h>
#include <elf.h>
#include <linux/kernel.h>
#ifndef EM_AARCH64
#define EM_AARCH64 183 /* ARM 64 bit */
#endif
#ifndef EM_LOONGARCH
#define EM_LOONGARCH 258 /* LoongArch */
#endif
/* Define const char * {arch}_register_tbl[] */
#define DEFINE_DWARF_REGSTR_TABLE
#include "../arch/x86/include/dwarf-regs-table.h"
#include "../arch/arm/include/dwarf-regs-table.h"
#include "../arch/arm64/include/dwarf-regs-table.h"
#include "../arch/sh/include/dwarf-regs-table.h"
#include "../arch/powerpc/include/dwarf-regs-table.h"
#include "../arch/s390/include/dwarf-regs-table.h"
#include "../arch/sparc/include/dwarf-regs-table.h"
#include "../arch/xtensa/include/dwarf-regs-table.h"
#include "../arch/mips/include/dwarf-regs-table.h"
#include "../arch/loongarch/include/dwarf-regs-table.h"
#define __get_dwarf_regstr(tbl, n) (((n) < ARRAY_SIZE(tbl)) ? (tbl)[(n)] : NULL)
/* Return architecture dependent register string (for kprobe-tracer) */
const char *get_dwarf_regstr(unsigned int n, unsigned int machine)
{
switch (machine) {
case EM_NONE: /* Generic arch - use host arch */
return get_arch_regstr(n);
case EM_386:
return __get_dwarf_regstr(x86_32_regstr_tbl, n);
case EM_X86_64:
return __get_dwarf_regstr(x86_64_regstr_tbl, n);
case EM_ARM:
return __get_dwarf_regstr(arm_regstr_tbl, n);
case EM_AARCH64:
return __get_dwarf_regstr(aarch64_regstr_tbl, n);
case EM_SH:
return __get_dwarf_regstr(sh_regstr_tbl, n);
case EM_S390:
return __get_dwarf_regstr(s390_regstr_tbl, n);
case EM_PPC:
case EM_PPC64:
return __get_dwarf_regstr(powerpc_regstr_tbl, n);
case EM_SPARC:
case EM_SPARCV9:
return __get_dwarf_regstr(sparc_regstr_tbl, n);
case EM_XTENSA:
return __get_dwarf_regstr(xtensa_regstr_tbl, n);
case EM_MIPS:
return __get_dwarf_regstr(mips_regstr_tbl, n);
case EM_LOONGARCH:
return __get_dwarf_regstr(loongarch_regstr_tbl, n);
default:
pr_err("ELF MACHINE %x is not supported.\n", machine);
}
return NULL;
}
| linux-master | tools/perf/util/dwarf-regs.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <lzma.h>
#include <stdio.h>
#include <linux/compiler.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "compress.h"
#include "debug.h"
#include <string.h>
#include <unistd.h>
#include <internal/lib.h>
#define BUFSIZE 8192
static const char *lzma_strerror(lzma_ret ret)
{
switch ((int) ret) {
case LZMA_MEM_ERROR:
return "Memory allocation failed";
case LZMA_OPTIONS_ERROR:
return "Unsupported decompressor flags";
case LZMA_FORMAT_ERROR:
return "The input is not in the .xz format";
case LZMA_DATA_ERROR:
return "Compressed file is corrupt";
case LZMA_BUF_ERROR:
return "Compressed file is truncated or otherwise corrupt";
default:
return "Unknown error, possibly a bug";
}
}
int lzma_decompress_to_file(const char *input, int output_fd)
{
lzma_action action = LZMA_RUN;
lzma_stream strm = LZMA_STREAM_INIT;
lzma_ret ret;
int err = -1;
u8 buf_in[BUFSIZE];
u8 buf_out[BUFSIZE];
FILE *infile;
infile = fopen(input, "rb");
if (!infile) {
pr_debug("lzma: fopen failed on %s: '%s'\n", input, strerror(errno));
return -1;
}
ret = lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED);
if (ret != LZMA_OK) {
pr_debug("lzma: lzma_stream_decoder failed %s (%d)\n", lzma_strerror(ret), ret);
goto err_fclose;
}
strm.next_in = NULL;
strm.avail_in = 0;
strm.next_out = buf_out;
strm.avail_out = sizeof(buf_out);
while (1) {
if (strm.avail_in == 0 && !feof(infile)) {
strm.next_in = buf_in;
strm.avail_in = fread(buf_in, 1, sizeof(buf_in), infile);
if (ferror(infile)) {
pr_debug("lzma: read error: %s\n", strerror(errno));
goto err_lzma_end;
}
if (feof(infile))
action = LZMA_FINISH;
}
ret = lzma_code(&strm, action);
if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
ssize_t write_size = sizeof(buf_out) - strm.avail_out;
if (writen(output_fd, buf_out, write_size) != write_size) {
pr_debug("lzma: write error: %s\n", strerror(errno));
goto err_lzma_end;
}
strm.next_out = buf_out;
strm.avail_out = sizeof(buf_out);
}
if (ret != LZMA_OK) {
if (ret == LZMA_STREAM_END)
break;
pr_debug("lzma: failed %s\n", lzma_strerror(ret));
goto err_lzma_end;
}
}
err = 0;
err_lzma_end:
lzma_end(&strm);
err_fclose:
fclose(infile);
return err;
}
bool lzma_is_compressed(const char *input)
{
int fd = open(input, O_RDONLY);
const uint8_t magic[6] = { 0xFD, '7', 'z', 'X', 'Z', 0x00 };
char buf[6] = { 0 };
ssize_t rc;
if (fd < 0)
return -1;
rc = read(fd, buf, sizeof(buf));
close(fd);
return rc == sizeof(buf) ?
memcmp(buf, magic, sizeof(buf)) == 0 : false;
}
| linux-master | tools/perf/util/lzma.c |
// SPDX-License-Identifier: GPL-2.0
#include "cacheline.h"
#include <unistd.h>
#ifdef _SC_LEVEL1_DCACHE_LINESIZE
#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE)
#else
#include <api/fs/fs.h>
#include "debug.h"
static void cache_line_size(int *cacheline_sizep)
{
if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep))
pr_debug("cannot determine cache line size");
}
#endif
int cacheline_size(void)
{
static int size;
if (!size)
cache_line_size(&size);
return size;
}
| linux-master | tools/perf/util/cacheline.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
#include "evswitch.h"
#include "evlist.h"
bool evswitch__discard(struct evswitch *evswitch, struct evsel *evsel)
{
if (evswitch->on && evswitch->discarding) {
if (evswitch->on != evsel)
return true;
evswitch->discarding = false;
if (!evswitch->show_on_off_events)
return true;
return false;
}
if (evswitch->off && !evswitch->discarding) {
if (evswitch->off != evsel)
return false;
evswitch->discarding = true;
if (!evswitch->show_on_off_events)
return true;
}
return false;
}
static int evswitch__fprintf_enoent(FILE *fp, const char *evtype, const char *evname)
{
int printed = fprintf(fp, "ERROR: switch-%s event not found (%s)\n", evtype, evname);
return printed += fprintf(fp, "HINT: use 'perf evlist' to see the available event names\n");
}
int evswitch__init(struct evswitch *evswitch, struct evlist *evlist, FILE *fp)
{
if (evswitch->on_name) {
evswitch->on = evlist__find_evsel_by_str(evlist, evswitch->on_name);
if (evswitch->on == NULL) {
evswitch__fprintf_enoent(fp, "on", evswitch->on_name);
return -ENOENT;
}
evswitch->discarding = true;
}
if (evswitch->off_name) {
evswitch->off = evlist__find_evsel_by_str(evlist, evswitch->off_name);
if (evswitch->off == NULL) {
evswitch__fprintf_enoent(fp, "off", evswitch->off_name);
return -ENOENT;
}
}
return 0;
}
| linux-master | tools/perf/util/evswitch.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
#include <api/fs/fs.h>
#include "trace-event.h"
#include "machine.h"
/*
* global trace_event object used by trace_event__tp_format
*
* TODO There's no cleanup call for this. Add some sort of
* __exit function support and call trace_event__cleanup
* there.
*/
static struct trace_event tevent;
static bool tevent_initialized;
int trace_event__init(struct trace_event *t)
{
struct tep_handle *pevent = tep_alloc();
if (pevent) {
t->plugin_list = tep_load_plugins(pevent);
t->pevent = pevent;
}
return pevent ? 0 : -1;
}
static int trace_event__init2(void)
{
int be = tep_is_bigendian();
struct tep_handle *pevent;
if (trace_event__init(&tevent))
return -1;
pevent = tevent.pevent;
tep_set_flag(pevent, TEP_NSEC_OUTPUT);
tep_set_file_bigendian(pevent, be);
tep_set_local_bigendian(pevent, be);
tevent_initialized = true;
return 0;
}
int trace_event__register_resolver(struct machine *machine,
tep_func_resolver_t *func)
{
if (!tevent_initialized && trace_event__init2())
return -1;
return tep_set_function_resolver(tevent.pevent, func, machine);
}
void trace_event__cleanup(struct trace_event *t)
{
tep_unload_plugins(t->plugin_list, t->pevent);
tep_free(t->pevent);
}
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
static struct tep_event*
tp_format(const char *sys, const char *name)
{
char *tp_dir = get_events_file(sys);
struct tep_handle *pevent = tevent.pevent;
struct tep_event *event = NULL;
char path[PATH_MAX];
size_t size;
char *data;
int err;
if (!tp_dir)
return ERR_PTR(-errno);
scnprintf(path, PATH_MAX, "%s/%s/format", tp_dir, name);
put_events_file(tp_dir);
err = filename__read_str(path, &data, &size);
if (err)
return ERR_PTR(err);
tep_parse_format(pevent, &event, data, size, sys);
free(data);
return event;
}
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
struct tep_event*
trace_event__tp_format(const char *sys, const char *name)
{
if (!tevent_initialized && trace_event__init2())
return ERR_PTR(-ENOMEM);
return tp_format(sys, name);
}
struct tep_event *trace_event__tp_format_id(int id)
{
if (!tevent_initialized && trace_event__init2())
return ERR_PTR(-ENOMEM);
return tep_find_event(tevent.pevent, id);
}
| linux-master | tools/perf/util/trace-event.c |
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <perf/cpumap.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <linux/perf_event.h>
#include <linux/zalloc.h>
#include "cpumap.h"
#include "dso.h"
#include "event.h"
#include "debug.h"
#include "hist.h"
#include "machine.h"
#include "sort.h"
#include "string2.h"
#include "strlist.h"
#include "thread.h"
#include "thread_map.h"
#include "time-utils.h"
#include <linux/ctype.h>
#include "map.h"
#include "util/namespaces.h"
#include "symbol.h"
#include "symbol/kallsyms.h"
#include "asm/bug.h"
#include "stat.h"
#include "session.h"
#include "bpf-event.h"
#include "print_binary.h"
#include "tool.h"
#include "util.h"
static const char *perf_event__names[] = {
[0] = "TOTAL",
[PERF_RECORD_MMAP] = "MMAP",
[PERF_RECORD_MMAP2] = "MMAP2",
[PERF_RECORD_LOST] = "LOST",
[PERF_RECORD_COMM] = "COMM",
[PERF_RECORD_EXIT] = "EXIT",
[PERF_RECORD_THROTTLE] = "THROTTLE",
[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
[PERF_RECORD_FORK] = "FORK",
[PERF_RECORD_READ] = "READ",
[PERF_RECORD_SAMPLE] = "SAMPLE",
[PERF_RECORD_AUX] = "AUX",
[PERF_RECORD_ITRACE_START] = "ITRACE_START",
[PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES",
[PERF_RECORD_SWITCH] = "SWITCH",
[PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
[PERF_RECORD_NAMESPACES] = "NAMESPACES",
[PERF_RECORD_KSYMBOL] = "KSYMBOL",
[PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
[PERF_RECORD_CGROUP] = "CGROUP",
[PERF_RECORD_TEXT_POKE] = "TEXT_POKE",
[PERF_RECORD_AUX_OUTPUT_HW_ID] = "AUX_OUTPUT_HW_ID",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
[PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
[PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
[PERF_RECORD_ID_INDEX] = "ID_INDEX",
[PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
[PERF_RECORD_AUXTRACE] = "AUXTRACE",
[PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
[PERF_RECORD_THREAD_MAP] = "THREAD_MAP",
[PERF_RECORD_CPU_MAP] = "CPU_MAP",
[PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG",
[PERF_RECORD_STAT] = "STAT",
[PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
[PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
[PERF_RECORD_TIME_CONV] = "TIME_CONV",
[PERF_RECORD_HEADER_FEATURE] = "FEATURE",
[PERF_RECORD_COMPRESSED] = "COMPRESSED",
[PERF_RECORD_FINISHED_INIT] = "FINISHED_INIT",
};
const char *perf_event__name(unsigned int id)
{
if (id >= ARRAY_SIZE(perf_event__names))
return "INVALID";
if (!perf_event__names[id])
return "UNKNOWN";
return perf_event__names[id];
}
struct process_symbol_args {
const char *name;
u64 start;
};
static int find_func_symbol_cb(void *arg, const char *name, char type,
u64 start)
{
struct process_symbol_args *args = arg;
/*
* Must be a function or at least an alias, as in PARISC64, where "_text" is
* an 'A' to the same address as "_stext".
*/
if (!(kallsyms__is_function(type) ||
type == 'A') || strcmp(name, args->name))
return 0;
args->start = start;
return 1;
}
static int find_any_symbol_cb(void *arg, const char *name,
char type __maybe_unused, u64 start)
{
struct process_symbol_args *args = arg;
if (strcmp(name, args->name))
return 0;
args->start = start;
return 1;
}
int kallsyms__get_function_start(const char *kallsyms_filename,
const char *symbol_name, u64 *addr)
{
struct process_symbol_args args = { .name = symbol_name, };
if (kallsyms__parse(kallsyms_filename, &args, find_func_symbol_cb) <= 0)
return -1;
*addr = args.start;
return 0;
}
int kallsyms__get_symbol_start(const char *kallsyms_filename,
const char *symbol_name, u64 *addr)
{
struct process_symbol_args args = { .name = symbol_name, };
if (kallsyms__parse(kallsyms_filename, &args, find_any_symbol_cb) <= 0)
return -1;
*addr = args.start;
return 0;
}
void perf_event__read_stat_config(struct perf_stat_config *config,
struct perf_record_stat_config *event)
{
unsigned i;
for (i = 0; i < event->nr; i++) {
switch (event->data[i].tag) {
#define CASE(__term, __val) \
case PERF_STAT_CONFIG_TERM__##__term: \
config->__val = event->data[i].val; \
break;
CASE(AGGR_MODE, aggr_mode)
CASE(SCALE, scale)
CASE(INTERVAL, interval)
CASE(AGGR_LEVEL, aggr_level)
#undef CASE
default:
pr_warning("unknown stat config term %" PRI_lu64 "\n",
event->data[i].tag);
}
}
}
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
const char *s;
if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
s = " exec";
else
s = "";
return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
}
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
{
size_t ret = 0;
struct perf_ns_link_info *ns_link_info;
u32 nr_namespaces, idx;
ns_link_info = event->namespaces.link_info;
nr_namespaces = event->namespaces.nr_namespaces;
ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
event->namespaces.pid,
event->namespaces.tid,
nr_namespaces);
for (idx = 0; idx < nr_namespaces; idx++) {
if (idx && (idx % 4 == 0))
ret += fprintf(fp, "\n\t\t ");
ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
perf_ns__name(idx), (u64)ns_link_info[idx].dev,
(u64)ns_link_info[idx].ino,
((idx + 1) != nr_namespaces) ? ", " : "]\n");
}
return ret;
}
size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp)
{
return fprintf(fp, " cgroup: %" PRI_lu64 " %s\n",
event->cgroup.id, event->cgroup.path);
}
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_comm_event(machine, event, sample);
}
int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_namespaces_event(machine, event, sample);
}
int perf_event__process_cgroup(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_cgroup_event(machine, event, sample);
}
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_lost_event(machine, event, sample);
}
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return machine__process_aux_event(machine, event);
}
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return machine__process_itrace_start_event(machine, event);
}
int perf_event__process_aux_output_hw_id(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return machine__process_aux_output_hw_id_event(machine, event);
}
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_lost_samples_event(machine, event, sample);
}
int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return machine__process_switch_event(machine, event);
}
int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return machine__process_ksymbol(machine, event, sample);
}
int perf_event__process_bpf(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_bpf(machine, event, sample);
}
int perf_event__process_text_poke(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_text_poke(machine, event, sample);
}
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64 "]: %c %s\n",
event->mmap.pid, event->mmap.tid, event->mmap.start,
event->mmap.len, event->mmap.pgoff,
(event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
event->mmap.filename);
}
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
char sbuild_id[SBUILD_ID_SIZE];
struct build_id bid;
build_id__init(&bid, event->mmap2.build_id,
event->mmap2.build_id_size);
build_id__sprintf(&bid, sbuild_id);
return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
" <%s>]: %c%c%c%c %s\n",
event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff, sbuild_id,
(event->mmap2.prot & PROT_READ) ? 'r' : '-',
(event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
(event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
(event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
event->mmap2.filename);
} else {
return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
" %02x:%02x %"PRI_lu64" %"PRI_lu64"]: %c%c%c%c %s\n",
event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
event->mmap2.min, event->mmap2.ino,
event->mmap2.ino_generation,
(event->mmap2.prot & PROT_READ) ? 'r' : '-',
(event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
(event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
(event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
event->mmap2.filename);
}
}
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
{
struct perf_thread_map *threads = thread_map__new_event(&event->thread_map);
size_t ret;
ret = fprintf(fp, " nr: ");
if (threads)
ret += thread_map__fprintf(threads, fp);
else
ret += fprintf(fp, "failed to get threads from event\n");
perf_thread_map__put(threads);
return ret;
}
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
size_t ret;
ret = fprintf(fp, ": ");
if (cpus)
ret += cpu_map__fprintf(cpus, fp);
else
ret += fprintf(fp, "failed to get cpumap from event\n");
perf_cpu_map__put(cpus);
return ret;
}
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_mmap_event(machine, event, sample);
}
int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_mmap2_event(machine, event, sample);
}
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
return fprintf(fp, "(%d:%d):(%d:%d)\n",
event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
}
int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_fork_event(machine, event, sample);
}
int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_exit_event(machine, event, sample);
}
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s]\n",
event->aux.aux_offset, event->aux.aux_size,
event->aux.flags,
event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : "");
}
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
return fprintf(fp, " pid: %u tid: %u\n",
event->itrace_start.pid, event->itrace_start.tid);
}
size_t perf_event__fprintf_aux_output_hw_id(union perf_event *event, FILE *fp)
{
return fprintf(fp, " hw_id: %#"PRI_lx64"\n",
event->aux_output_hw_id.hw_id);
}
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
const char *in_out = !out ? "IN " :
!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
"OUT " : "OUT preempt";
if (event->header.type == PERF_RECORD_SWITCH)
return fprintf(fp, " %s\n", in_out);
return fprintf(fp, " %s %s pid/tid: %5d/%-5d\n",
in_out, out ? "next" : "prev",
event->context_switch.next_prev_pid,
event->context_switch.next_prev_tid);
}
static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
{
return fprintf(fp, " lost %" PRI_lu64 "\n", event->lost.lost);
}
size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
{
return fprintf(fp, " addr %" PRI_lx64 " len %u type %u flags 0x%x name %s\n",
event->ksymbol.addr, event->ksymbol.len,
event->ksymbol.ksym_type,
event->ksymbol.flags, event->ksymbol.name);
}
size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp)
{
return fprintf(fp, " type %u, flags %u, id %u\n",
event->bpf.type, event->bpf.flags, event->bpf.id);
}
static int text_poke_printer(enum binary_printer_ops op, unsigned int val,
void *extra, FILE *fp)
{
bool old = *(bool *)extra;
switch ((int)op) {
case BINARY_PRINT_LINE_BEGIN:
return fprintf(fp, " %s bytes:", old ? "Old" : "New");
case BINARY_PRINT_NUM_DATA:
return fprintf(fp, " %02x", val);
case BINARY_PRINT_LINE_END:
return fprintf(fp, "\n");
default:
return 0;
}
}
size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine, FILE *fp)
{
struct perf_record_text_poke_event *tp = &event->text_poke;
size_t ret;
bool old;
ret = fprintf(fp, " %" PRI_lx64 " ", tp->addr);
if (machine) {
struct addr_location al;
addr_location__init(&al);
al.map = map__get(maps__find(machine__kernel_maps(machine), tp->addr));
if (al.map && map__load(al.map) >= 0) {
al.addr = map__map_ip(al.map, tp->addr);
al.sym = map__find_symbol(al.map, al.addr);
if (al.sym)
ret += symbol__fprintf_symname_offs(al.sym, &al, fp);
}
addr_location__exit(&al);
}
ret += fprintf(fp, " old len %u new len %u\n", tp->old_len, tp->new_len);
old = true;
ret += binary__fprintf(tp->bytes, tp->old_len, 16, text_poke_printer,
&old, fp);
old = false;
ret += binary__fprintf(tp->bytes + tp->old_len, tp->new_len, 16,
text_poke_printer, &old, fp);
return ret;
}
size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
perf_event__name(event->header.type));
switch (event->header.type) {
case PERF_RECORD_COMM:
ret += perf_event__fprintf_comm(event, fp);
break;
case PERF_RECORD_FORK:
case PERF_RECORD_EXIT:
ret += perf_event__fprintf_task(event, fp);
break;
case PERF_RECORD_MMAP:
ret += perf_event__fprintf_mmap(event, fp);
break;
case PERF_RECORD_NAMESPACES:
ret += perf_event__fprintf_namespaces(event, fp);
break;
case PERF_RECORD_CGROUP:
ret += perf_event__fprintf_cgroup(event, fp);
break;
case PERF_RECORD_MMAP2:
ret += perf_event__fprintf_mmap2(event, fp);
break;
case PERF_RECORD_AUX:
ret += perf_event__fprintf_aux(event, fp);
break;
case PERF_RECORD_ITRACE_START:
ret += perf_event__fprintf_itrace_start(event, fp);
break;
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
ret += perf_event__fprintf_switch(event, fp);
break;
case PERF_RECORD_LOST:
ret += perf_event__fprintf_lost(event, fp);
break;
case PERF_RECORD_KSYMBOL:
ret += perf_event__fprintf_ksymbol(event, fp);
break;
case PERF_RECORD_BPF_EVENT:
ret += perf_event__fprintf_bpf(event, fp);
break;
case PERF_RECORD_TEXT_POKE:
ret += perf_event__fprintf_text_poke(event, machine, fp);
break;
case PERF_RECORD_AUX_OUTPUT_HW_ID:
ret += perf_event__fprintf_aux_output_hw_id(event, fp);
break;
default:
ret += fprintf(fp, "\n");
}
return ret;
}
int perf_event__process(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_event(machine, event, sample);
}
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct maps *maps = thread__maps(thread);
struct machine *machine = maps__machine(maps);
bool load_map = false;
maps__zput(al->maps);
map__zput(al->map);
thread__zput(al->thread);
al->thread = thread__get(thread);
al->addr = addr;
al->cpumode = cpumode;
al->filtered = 0;
if (machine == NULL)
return NULL;
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
maps = machine__kernel_maps(machine);
load_map = true;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
maps = machine__kernel_maps(machine);
load_map = true;
} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
al->level = 'u';
} else {
al->level = 'H';
if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
!perf_guest)
al->filtered |= (1 << HIST_FILTER__GUEST);
if ((cpumode == PERF_RECORD_MISC_USER ||
cpumode == PERF_RECORD_MISC_KERNEL) &&
!perf_host)
al->filtered |= (1 << HIST_FILTER__HOST);
return NULL;
}
al->maps = maps__get(maps);
al->map = map__get(maps__find(maps, al->addr));
if (al->map != NULL) {
/*
* Kernel maps might be changed when loading symbols so loading
* must be done prior to using kernel maps.
*/
if (load_map)
map__load(al->map);
al->addr = map__map_ip(al->map, al->addr);
}
return al->map;
}
/*
* For branch stacks or branch samples, the sample cpumode might not be correct
* because it applies only to the sample 'ip' and not necessary to 'addr' or
* branch stack addresses. If possible, use a fallback to deal with those cases.
*/
struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct map *map = thread__find_map(thread, cpumode, addr, al);
struct machine *machine = maps__machine(thread__maps(thread));
u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
if (map || addr_cpumode == cpumode)
return map;
return thread__find_map(thread, addr_cpumode, addr, al);
}
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al)
{
al->sym = NULL;
if (thread__find_map(thread, cpumode, addr, al))
al->sym = map__find_symbol(al->map, al->addr);
return al->sym;
}
struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al)
{
al->sym = NULL;
if (thread__find_map_fb(thread, cpumode, addr, al))
al->sym = map__find_symbol(al->map, al->addr);
return al->sym;
}
static bool check_address_range(struct intlist *addr_list, int addr_range,
unsigned long addr)
{
struct int_node *pos;
intlist__for_each_entry(pos, addr_list) {
if (addr >= pos->i && addr < pos->i + addr_range)
return true;
}
return false;
}
/*
* Callers need to drop the reference to al->thread, obtained in
* machine__findnew_thread()
*/
int machine__resolve(struct machine *machine, struct addr_location *al,
struct perf_sample *sample)
{
struct thread *thread;
struct dso *dso;
if (symbol_conf.guest_code && !machine__is_host(machine))
thread = machine__findnew_guest_code(machine, sample->pid);
else
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL)
return -1;
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
thread__find_map(thread, sample->cpumode, sample->ip, al);
dso = al->map ? map__dso(al->map) : NULL;
dump_printf(" ...... dso: %s\n",
dso
? dso->long_name
: (al->level == 'H' ? "[hypervisor]" : "<not found>"));
if (thread__is_filtered(thread))
al->filtered |= (1 << HIST_FILTER__THREAD);
thread__put(thread);
thread = NULL;
al->sym = NULL;
al->cpu = sample->cpu;
al->socket = -1;
al->srcline = NULL;
if (al->cpu >= 0) {
struct perf_env *env = machine->env;
if (env && env->cpu)
al->socket = env->cpu[al->cpu].socket_id;
}
if (al->map) {
if (symbol_conf.dso_list &&
(!dso || !(strlist__has_entry(symbol_conf.dso_list,
dso->short_name) ||
(dso->short_name != dso->long_name &&
strlist__has_entry(symbol_conf.dso_list,
dso->long_name))))) {
al->filtered |= (1 << HIST_FILTER__DSO);
}
al->sym = map__find_symbol(al->map, al->addr);
} else if (symbol_conf.dso_list) {
al->filtered |= (1 << HIST_FILTER__DSO);
}
if (symbol_conf.sym_list) {
int ret = 0;
char al_addr_str[32];
size_t sz = sizeof(al_addr_str);
if (al->sym) {
ret = strlist__has_entry(symbol_conf.sym_list,
al->sym->name);
}
if (!ret && al->sym) {
snprintf(al_addr_str, sz, "0x%"PRIx64,
map__unmap_ip(al->map, al->sym->start));
ret = strlist__has_entry(symbol_conf.sym_list,
al_addr_str);
}
if (!ret && symbol_conf.addr_list && al->map) {
unsigned long addr = map__unmap_ip(al->map, al->addr);
ret = intlist__has_entry(symbol_conf.addr_list, addr);
if (!ret && symbol_conf.addr_range) {
ret = check_address_range(symbol_conf.addr_list,
symbol_conf.addr_range,
addr);
}
}
if (!ret)
al->filtered |= (1 << HIST_FILTER__SYMBOL);
}
return 0;
}
bool is_bts_event(struct perf_event_attr *attr)
{
return attr->type == PERF_TYPE_HARDWARE &&
(attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
attr->sample_period == 1;
}
bool sample_addr_correlates_sym(struct perf_event_attr *attr)
{
if (attr->type == PERF_TYPE_SOFTWARE &&
(attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
return true;
if (is_bts_event(attr))
return true;
return false;
}
void thread__resolve(struct thread *thread, struct addr_location *al,
struct perf_sample *sample)
{
thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
al->cpu = sample->cpu;
al->sym = NULL;
if (al->map)
al->sym = map__find_symbol(al->map, al->addr);
}
| linux-master | tools/perf/util/event.c |
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <errno.h>
#include <inttypes.h>
#include <regex.h>
#include <stdlib.h>
#include "callchain.h"
#include "debug.h"
#include "dso.h"
#include "env.h"
#include "event.h"
#include "evsel.h"
#include "hist.h"
#include "machine.h"
#include "map.h"
#include "map_symbol.h"
#include "branch.h"
#include "mem-events.h"
#include "path.h"
#include "srcline.h"
#include "symbol.h"
#include "sort.h"
#include "strlist.h"
#include "target.h"
#include "thread.h"
#include "util.h"
#include "vdso.h"
#include <stdbool.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "unwind.h"
#include "linux/hash.h"
#include "asm/bug.h"
#include "bpf-event.h"
#include <internal/lib.h> // page_size
#include "cgroup.h"
#include "arm64-frame-pointer-unwind-support.h"
#include <linux/ctype.h>
#include <symbol/kallsyms.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/zalloc.h>
static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
struct thread *th, bool lock);
static struct dso *machine__kernel_dso(struct machine *machine)
{
return map__dso(machine->vmlinux_map);
}
static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
dsos->root = RB_ROOT;
init_rwsem(&dsos->lock);
}
static void machine__threads_init(struct machine *machine)
{
int i;
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
INIT_LIST_HEAD(&threads->dead);
threads->last_match = NULL;
}
}
static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd)
{
int to_find = (int) *((pid_t *)key);
return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
}
static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
struct rb_root *tree)
{
pid_t to_find = thread__tid(th);
struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid);
return rb_entry(nd, struct thread_rb_node, rb_node);
}
static int machine__set_mmap_name(struct machine *machine)
{
if (machine__is_host(machine))
machine->mmap_name = strdup("[kernel.kallsyms]");
else if (machine__is_default_guest(machine))
machine->mmap_name = strdup("[guest.kernel.kallsyms]");
else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
machine->pid) < 0)
machine->mmap_name = NULL;
return machine->mmap_name ? 0 : -ENOMEM;
}
static void thread__set_guest_comm(struct thread *thread, pid_t pid)
{
char comm[64];
snprintf(comm, sizeof(comm), "[guest/%d]", pid);
thread__set_comm(thread, comm, 0);
}
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
int err = -ENOMEM;
memset(machine, 0, sizeof(*machine));
machine->kmaps = maps__new(machine);
if (machine->kmaps == NULL)
return -ENOMEM;
RB_CLEAR_NODE(&machine->rb_node);
dsos__init(&machine->dsos);
machine__threads_init(machine);
machine->vdso_info = NULL;
machine->env = NULL;
machine->pid = pid;
machine->id_hdr_size = 0;
machine->kptr_restrict_warned = false;
machine->comm_exec = false;
machine->kernel_start = 0;
machine->vmlinux_map = NULL;
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
goto out;
if (machine__set_mmap_name(machine))
goto out;
if (pid != HOST_KERNEL_ID) {
struct thread *thread = machine__findnew_thread(machine, -1,
pid);
if (thread == NULL)
goto out;
thread__set_guest_comm(thread, pid);
thread__put(thread);
}
machine->current_tid = NULL;
err = 0;
out:
if (err) {
zfree(&machine->kmaps);
zfree(&machine->root_dir);
zfree(&machine->mmap_name);
}
return 0;
}
struct machine *machine__new_host(void)
{
struct machine *machine = malloc(sizeof(*machine));
if (machine != NULL) {
machine__init(machine, "", HOST_KERNEL_ID);
if (machine__create_kernel_maps(machine) < 0)
goto out_delete;
}
return machine;
out_delete:
free(machine);
return NULL;
}
struct machine *machine__new_kallsyms(void)
{
struct machine *machine = machine__new_host();
/*
* FIXME:
* 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
* ask for not using the kcore parsing code, once this one is fixed
* to create a map per module.
*/
if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
machine__delete(machine);
machine = NULL;
}
return machine;
}
static void dsos__purge(struct dsos *dsos)
{
struct dso *pos, *n;
down_write(&dsos->lock);
list_for_each_entry_safe(pos, n, &dsos->head, node) {
RB_CLEAR_NODE(&pos->rb_node);
pos->root = NULL;
list_del_init(&pos->node);
dso__put(pos);
}
up_write(&dsos->lock);
}
static void dsos__exit(struct dsos *dsos)
{
dsos__purge(dsos);
exit_rwsem(&dsos->lock);
}
void machine__delete_threads(struct machine *machine)
{
struct rb_node *nd;
int i;
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
down_write(&threads->lock);
nd = rb_first_cached(&threads->entries);
while (nd) {
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
nd = rb_next(nd);
__machine__remove_thread(machine, trb, trb->thread, false);
}
up_write(&threads->lock);
}
}
void machine__exit(struct machine *machine)
{
int i;
if (machine == NULL)
return;
machine__destroy_kernel_maps(machine);
maps__zput(machine->kmaps);
dsos__exit(&machine->dsos);
machine__exit_vdso(machine);
zfree(&machine->root_dir);
zfree(&machine->mmap_name);
zfree(&machine->current_tid);
zfree(&machine->kallsyms_filename);
machine__delete_threads(machine);
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
exit_rwsem(&threads->lock);
}
}
void machine__delete(struct machine *machine)
{
if (machine) {
machine__exit(machine);
free(machine);
}
}
void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
machines->guests = RB_ROOT_CACHED;
}
void machines__exit(struct machines *machines)
{
machine__exit(&machines->host);
/* XXX exit guest */
}
struct machine *machines__add(struct machines *machines, pid_t pid,
const char *root_dir)
{
struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *pos, *machine = malloc(sizeof(*machine));
bool leftmost = true;
if (machine == NULL)
return NULL;
if (machine__init(machine, root_dir, pid) != 0) {
free(machine);
return NULL;
}
while (*p != NULL) {
parent = *p;
pos = rb_entry(parent, struct machine, rb_node);
if (pid < pos->pid)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&machine->rb_node, parent, p);
rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
machine->machines = machines;
return machine;
}
void machines__set_comm_exec(struct machines *machines, bool comm_exec)
{
struct rb_node *nd;
machines->host.comm_exec = comm_exec;
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
machine->comm_exec = comm_exec;
}
}
struct machine *machines__find(struct machines *machines, pid_t pid)
{
struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *machine;
struct machine *default_machine = NULL;
if (pid == HOST_KERNEL_ID)
return &machines->host;
while (*p != NULL) {
parent = *p;
machine = rb_entry(parent, struct machine, rb_node);
if (pid < machine->pid)
p = &(*p)->rb_left;
else if (pid > machine->pid)
p = &(*p)->rb_right;
else
return machine;
if (!machine->pid)
default_machine = machine;
}
return default_machine;
}
struct machine *machines__findnew(struct machines *machines, pid_t pid)
{
char path[PATH_MAX];
const char *root_dir = "";
struct machine *machine = machines__find(machines, pid);
if (machine && (machine->pid == pid))
goto out;
if ((pid != HOST_KERNEL_ID) &&
(pid != DEFAULT_GUEST_KERNEL_ID) &&
(symbol_conf.guestmount)) {
sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
if (access(path, R_OK)) {
static struct strlist *seen;
if (!seen)
seen = strlist__new(NULL, NULL);
if (!strlist__has_entry(seen, path)) {
pr_err("Can't access file %s\n", path);
strlist__add(seen, path);
}
machine = NULL;
goto out;
}
root_dir = path;
}
machine = machines__add(machines, pid, root_dir);
out:
return machine;
}
struct machine *machines__find_guest(struct machines *machines, pid_t pid)
{
struct machine *machine = machines__find(machines, pid);
if (!machine)
machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
return machine;
}
/*
* A common case for KVM test programs is that the test program acts as the
* hypervisor, creating, running and destroying the virtual machine, and
* providing the guest object code from its own object code. In this case,
* the VM is not running an OS, but only the functions loaded into it by the
* hypervisor test program, and conveniently, loaded at the same virtual
* addresses.
*
* Normally to resolve addresses, MMAP events are needed to map addresses
* back to the object code and debug symbols for that object code.
*
* Currently, there is no way to get such mapping information from guests
* but, in the scenario described above, the guest has the same mappings
* as the hypervisor, so support for that scenario can be achieved.
*
* To support that, copy the host thread's maps to the guest thread's maps.
* Note, we do not discover the guest until we encounter a guest event,
* which works well because it is not until then that we know that the host
* thread's maps have been set up.
*
* This function returns the guest thread. Apart from keeping the data
* structures sane, using a thread belonging to the guest machine, instead
* of the host thread, allows it to have its own comm (refer
* thread__set_guest_comm()).
*/
static struct thread *findnew_guest_code(struct machine *machine,
struct machine *host_machine,
pid_t pid)
{
struct thread *host_thread;
struct thread *thread;
int err;
if (!machine)
return NULL;
thread = machine__findnew_thread(machine, -1, pid);
if (!thread)
return NULL;
/* Assume maps are set up if there are any */
if (maps__nr_maps(thread__maps(thread)))
return thread;
host_thread = machine__find_thread(host_machine, -1, pid);
if (!host_thread)
goto out_err;
thread__set_guest_comm(thread, pid);
/*
* Guest code can be found in hypervisor process at the same address
* so copy host maps.
*/
err = maps__clone(thread, thread__maps(host_thread));
thread__put(host_thread);
if (err)
goto out_err;
return thread;
out_err:
thread__zput(thread);
return NULL;
}
struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
{
struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
struct machine *machine = machines__findnew(machines, pid);
return findnew_guest_code(machine, host_machine, pid);
}
struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
{
struct machines *machines = machine->machines;
struct machine *host_machine;
if (!machines)
return NULL;
host_machine = machines__find(machines, HOST_KERNEL_ID);
return findnew_guest_code(machine, host_machine, pid);
}
void machines__process_guests(struct machines *machines,
machine__process_t process, void *data)
{
struct rb_node *nd;
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
process(pos, data);
}
}
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
{
struct rb_node *node;
struct machine *machine;
machines->host.id_hdr_size = id_hdr_size;
for (node = rb_first_cached(&machines->guests); node;
node = rb_next(node)) {
machine = rb_entry(node, struct machine, rb_node);
machine->id_hdr_size = id_hdr_size;
}
return;
}
static void machine__update_thread_pid(struct machine *machine,
struct thread *th, pid_t pid)
{
struct thread *leader;
if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
return;
thread__set_pid(th, pid);
if (thread__pid(th) == thread__tid(th))
return;
leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
if (!leader)
goto out_err;
if (!thread__maps(leader))
thread__set_maps(leader, maps__new(machine));
if (!thread__maps(leader))
goto out_err;
if (thread__maps(th) == thread__maps(leader))
goto out_put;
if (thread__maps(th)) {
/*
* Maps are created from MMAP events which provide the pid and
* tid. Consequently there never should be any maps on a thread
* with an unknown pid. Just print an error if there are.
*/
if (!maps__empty(thread__maps(th)))
pr_err("Discarding thread maps for %d:%d\n",
thread__pid(th), thread__tid(th));
maps__put(thread__maps(th));
}
thread__set_maps(th, maps__get(thread__maps(leader)));
out_put:
thread__put(leader);
return;
out_err:
pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
goto out_put;
}
/*
* Front-end cache - TID lookups come in blocks,
* so most of the time we dont have to look up
* the full rbtree:
*/
static struct thread*
__threads__get_last_match(struct threads *threads, struct machine *machine,
int pid, int tid)
{
struct thread *th;
th = threads->last_match;
if (th != NULL) {
if (thread__tid(th) == tid) {
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
thread__put(threads->last_match);
threads->last_match = NULL;
}
return NULL;
}
static struct thread*
threads__get_last_match(struct threads *threads, struct machine *machine,
int pid, int tid)
{
struct thread *th = NULL;
if (perf_singlethreaded)
th = __threads__get_last_match(threads, machine, pid, tid);
return th;
}
static void
__threads__set_last_match(struct threads *threads, struct thread *th)
{
thread__put(threads->last_match);
threads->last_match = thread__get(th);
}
static void
threads__set_last_match(struct threads *threads, struct thread *th)
{
if (perf_singlethreaded)
__threads__set_last_match(threads, th);
}
/*
* Caller must eventually drop thread->refcnt returned with a successful
* lookup/new thread inserted.
*/
static struct thread *____machine__findnew_thread(struct machine *machine,
struct threads *threads,
pid_t pid, pid_t tid,
bool create)
{
struct rb_node **p = &threads->entries.rb_root.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
struct thread_rb_node *nd;
bool leftmost = true;
th = threads__get_last_match(threads, machine, pid, tid);
if (th)
return th;
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
if (thread__tid(th) == tid) {
threads__set_last_match(threads, th);
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
if (!create)
return NULL;
th = thread__new(pid, tid);
if (th == NULL)
return NULL;
nd = malloc(sizeof(*nd));
if (nd == NULL) {
thread__put(th);
return NULL;
}
nd->thread = th;
rb_link_node(&nd->rb_node, parent, p);
rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost);
/*
* We have to initialize maps separately after rb tree is updated.
*
* The reason is that we call machine__findnew_thread within
* thread__init_maps to find the thread leader and that would screwed
* the rb tree.
*/
if (thread__init_maps(th, machine)) {
pr_err("Thread init failed thread %d\n", pid);
rb_erase_cached(&nd->rb_node, &threads->entries);
RB_CLEAR_NODE(&nd->rb_node);
free(nd);
thread__put(th);
return NULL;
}
/*
* It is now in the rbtree, get a ref
*/
threads__set_last_match(threads, th);
++threads->nr;
return thread__get(th);
}
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
}
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
struct threads *threads = machine__threads(machine, tid);
struct thread *th;
down_write(&threads->lock);
th = __machine__findnew_thread(machine, pid, tid);
up_write(&threads->lock);
return th;
}
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
struct threads *threads = machine__threads(machine, tid);
struct thread *th;
down_read(&threads->lock);
th = ____machine__findnew_thread(machine, threads, pid, tid, false);
up_read(&threads->lock);
return th;
}
/*
* Threads are identified by pid and tid, and the idle task has pid == tid == 0.
* So here a single thread is created for that, but actually there is a separate
* idle task per cpu, so there should be one 'struct thread' per cpu, but there
* is only 1. That causes problems for some tools, requiring workarounds. For
* example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
*/
struct thread *machine__idle_thread(struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, 0, 0);
if (!thread || thread__set_comm(thread, "swapper", 0) ||
thread__set_namespaces(thread, 0, NULL))
pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
return thread;
}
struct comm *machine__thread_exec_comm(struct machine *machine,
struct thread *thread)
{
if (machine->comm_exec)
return thread__exec_comm(thread);
else
return thread__comm(thread);
}
int machine__process_comm_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(machine,
event->comm.pid,
event->comm.tid);
bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
int err = 0;
if (exec)
machine->comm_exec = true;
if (dump_trace)
perf_event__fprintf_comm(event, stdout);
if (thread == NULL ||
__thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
err = -1;
}
thread__put(thread);
return err;
}
int machine__process_namespaces_event(struct machine *machine __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct thread *thread = machine__findnew_thread(machine,
event->namespaces.pid,
event->namespaces.tid);
int err = 0;
WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
"\nWARNING: kernel seems to support more namespaces than perf"
" tool.\nTry updating the perf tool..\n\n");
WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
"\nWARNING: perf tool seems to support more namespaces than"
" the kernel.\nTry updating the kernel..\n\n");
if (dump_trace)
perf_event__fprintf_namespaces(event, stdout);
if (thread == NULL ||
thread__set_namespaces(thread, sample->time, &event->namespaces)) {
dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
err = -1;
}
thread__put(thread);
return err;
}
int machine__process_cgroup_event(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct cgroup *cgrp;
if (dump_trace)
perf_event__fprintf_cgroup(event, stdout);
cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
if (cgrp == NULL)
return -ENOMEM;
return 0;
}
int machine__process_lost_event(struct machine *machine __maybe_unused,
union perf_event *event, struct perf_sample *sample __maybe_unused)
{
dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
event->lost.id, event->lost.lost);
return 0;
}
int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
union perf_event *event, struct perf_sample *sample)
{
dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
sample->id, event->lost_samples.lost);
return 0;
}
static struct dso *machine__findnew_module_dso(struct machine *machine,
struct kmod_path *m,
const char *filename)
{
struct dso *dso;
down_write(&machine->dsos.lock);
dso = __dsos__find(&machine->dsos, m->name, true);
if (!dso) {
dso = __dsos__addnew(&machine->dsos, m->name);
if (dso == NULL)
goto out_unlock;
dso__set_module_info(dso, m, machine);
dso__set_long_name(dso, strdup(filename), true);
dso->kernel = DSO_SPACE__KERNEL;
}
dso__get(dso);
out_unlock:
up_write(&machine->dsos.lock);
return dso;
}
int machine__process_aux_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_aux(event, stdout);
return 0;
}
int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_itrace_start(event, stdout);
return 0;
}
int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_aux_output_hw_id(event, stdout);
return 0;
}
int machine__process_switch_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_switch(event, stdout);
return 0;
}
static int machine__process_ksymbol_register(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct symbol *sym;
struct dso *dso;
struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
bool put_map = false;
int err = 0;
if (!map) {
dso = dso__new(event->ksymbol.name);
if (!dso) {
err = -ENOMEM;
goto out;
}
dso->kernel = DSO_SPACE__KERNEL;
map = map__new2(0, dso);
dso__put(dso);
if (!map) {
err = -ENOMEM;
goto out;
}
/*
* The inserted map has a get on it, we need to put to release
* the reference count here, but do it after all accesses are
* done.
*/
put_map = true;
if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
dso->binary_type = DSO_BINARY_TYPE__OOL;
dso->data.file_size = event->ksymbol.len;
dso__set_loaded(dso);
}
map__set_start(map, event->ksymbol.addr);
map__set_end(map, map__start(map) + event->ksymbol.len);
err = maps__insert(machine__kernel_maps(machine), map);
if (err) {
err = -ENOMEM;
goto out;
}
dso__set_loaded(dso);
if (is_bpf_image(event->ksymbol.name)) {
dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
dso__set_long_name(dso, "", false);
}
} else {
dso = map__dso(map);
}
sym = symbol__new(map__map_ip(map, map__start(map)),
event->ksymbol.len,
0, 0, event->ksymbol.name);
if (!sym) {
err = -ENOMEM;
goto out;
}
dso__insert_symbol(dso, sym);
out:
if (put_map)
map__put(map);
return err;
}
static int machine__process_ksymbol_unregister(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct symbol *sym;
struct map *map;
map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
if (!map)
return 0;
if (RC_CHK_ACCESS(map) != RC_CHK_ACCESS(machine->vmlinux_map))
maps__remove(machine__kernel_maps(machine), map);
else {
struct dso *dso = map__dso(map);
sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
if (sym)
dso__delete_symbol(dso, sym);
}
return 0;
}
int machine__process_ksymbol(struct machine *machine __maybe_unused,
union perf_event *event,
struct perf_sample *sample)
{
if (dump_trace)
perf_event__fprintf_ksymbol(event, stdout);
if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
return machine__process_ksymbol_unregister(machine, event,
sample);
return machine__process_ksymbol_register(machine, event, sample);
}
int machine__process_text_poke(struct machine *machine, union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
struct dso *dso = map ? map__dso(map) : NULL;
if (dump_trace)
perf_event__fprintf_text_poke(event, machine, stdout);
if (!event->text_poke.new_len)
return 0;
if (cpumode != PERF_RECORD_MISC_KERNEL) {
pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
return 0;
}
if (dso) {
u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
int ret;
/*
* Kernel maps might be changed when loading symbols so loading
* must be done prior to using kernel maps.
*/
map__load(map);
ret = dso__data_write_cache_addr(dso, map, machine,
event->text_poke.addr,
new_bytes,
event->text_poke.new_len);
if (ret != event->text_poke.new_len)
pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
event->text_poke.addr);
} else {
pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
event->text_poke.addr);
}
return 0;
}
static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
const char *filename)
{
struct map *map = NULL;
struct kmod_path m;
struct dso *dso;
int err;
if (kmod_path__parse_name(&m, filename))
return NULL;
dso = machine__findnew_module_dso(machine, &m, filename);
if (dso == NULL)
goto out;
map = map__new2(start, dso);
if (map == NULL)
goto out;
err = maps__insert(machine__kernel_maps(machine), map);
/* If maps__insert failed, return NULL. */
if (err) {
map__put(map);
map = NULL;
}
out:
/* put the dso here, corresponding to machine__findnew_module_dso */
dso__put(dso);
zfree(&m.name);
return map;
}
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
{
struct rb_node *nd;
size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->dsos.head, fp);
}
return ret;
}
size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
}
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
struct rb_node *nd;
size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
}
return ret;
}
size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
int i;
size_t printed = 0;
struct dso *kdso = machine__kernel_dso(machine);
if (kdso->has_build_id) {
char filename[PATH_MAX];
if (dso__build_id_filename(kdso, filename, sizeof(filename),
false))
printed += fprintf(fp, "[0] %s\n", filename);
}
for (i = 0; i < vmlinux_path__nr_entries; ++i)
printed += fprintf(fp, "[%d] %s\n",
i + kdso->has_build_id, vmlinux_path[i]);
return printed;
}
size_t machine__fprintf(struct machine *machine, FILE *fp)
{
struct rb_node *nd;
size_t ret;
int i;
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
down_read(&threads->lock);
ret = fprintf(fp, "Threads: %u\n", threads->nr);
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
ret += thread__fprintf(pos, fp);
}
up_read(&threads->lock);
}
return ret;
}
static struct dso *machine__get_kernel(struct machine *machine)
{
const char *vmlinux_name = machine->mmap_name;
struct dso *kernel;
if (machine__is_host(machine)) {
if (symbol_conf.vmlinux_name)
vmlinux_name = symbol_conf.vmlinux_name;
kernel = machine__findnew_kernel(machine, vmlinux_name,
"[kernel]", DSO_SPACE__KERNEL);
} else {
if (symbol_conf.default_guest_vmlinux_name)
vmlinux_name = symbol_conf.default_guest_vmlinux_name;
kernel = machine__findnew_kernel(machine, vmlinux_name,
"[guest.kernel]",
DSO_SPACE__KERNEL_GUEST);
}
if (kernel != NULL && (!kernel->has_build_id))
dso__read_running_kernel_build_id(kernel, machine);
return kernel;
}
void machine__get_kallsyms_filename(struct machine *machine, char *buf,
size_t bufsz)
{
if (machine__is_default_guest(machine))
scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
else
scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}
const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
/* Figure out the start address of kernel map from /proc/kallsyms.
* Returns the name of the start symbol in *symbol_name. Pass in NULL as
* symbol_name if it's not that important.
*/
static int machine__get_running_kernel_start(struct machine *machine,
const char **symbol_name,
u64 *start, u64 *end)
{
char filename[PATH_MAX];
int i, err = -1;
const char *name;
u64 addr = 0;
machine__get_kallsyms_filename(machine, filename, PATH_MAX);
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
err = kallsyms__get_function_start(filename, name, &addr);
if (!err)
break;
}
if (err)
return -1;
if (symbol_name)
*symbol_name = name;
*start = addr;
err = kallsyms__get_symbol_start(filename, "_edata", &addr);
if (err)
err = kallsyms__get_function_start(filename, "_etext", &addr);
if (!err)
*end = addr;
return 0;
}
int machine__create_extra_kernel_map(struct machine *machine,
struct dso *kernel,
struct extra_kernel_map *xm)
{
struct kmap *kmap;
struct map *map;
int err;
map = map__new2(xm->start, kernel);
if (!map)
return -ENOMEM;
map__set_end(map, xm->end);
map__set_pgoff(map, xm->pgoff);
kmap = map__kmap(map);
strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
err = maps__insert(machine__kernel_maps(machine), map);
if (!err) {
pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
kmap->name, map__start(map), map__end(map));
}
map__put(map);
return err;
}
static u64 find_entry_trampoline(struct dso *dso)
{
/* Duplicates are removed so lookup all aliases */
const char *syms[] = {
"_entry_trampoline",
"__entry_trampoline_start",
"entry_SYSCALL_64_trampoline",
};
struct symbol *sym = dso__first_symbol(dso);
unsigned int i;
for (; sym; sym = dso__next_symbol(sym)) {
if (sym->binding != STB_GLOBAL)
continue;
for (i = 0; i < ARRAY_SIZE(syms); i++) {
if (!strcmp(sym->name, syms[i]))
return sym->start;
}
}
return 0;
}
/*
* These values can be used for kernels that do not have symbols for the entry
* trampolines in kallsyms.
*/
#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
#define X86_64_ENTRY_TRAMPOLINE 0x6000
/* Map x86_64 PTI entry trampolines */
int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct dso *kernel)
{
struct maps *kmaps = machine__kernel_maps(machine);
int nr_cpus_avail, cpu;
bool found = false;
struct map_rb_node *rb_node;
u64 pgoff;
/*
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
maps__for_each_entry(kmaps, rb_node) {
struct map *dest_map, *map = rb_node->map;
struct kmap *kmap = __map__kmap(map);
if (!kmap || !is_entry_trampoline(kmap->name))
continue;
dest_map = maps__find(kmaps, map__pgoff(map));
if (dest_map != map)
map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
found = true;
}
if (found || machine->trampolines_mapped)
return 0;
pgoff = find_entry_trampoline(kernel);
if (!pgoff)
return 0;
nr_cpus_avail = machine__nr_cpus_avail(machine);
/* Add a 1 page map for each CPU's entry trampoline */
for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
cpu * X86_64_CPU_ENTRY_AREA_SIZE +
X86_64_ENTRY_TRAMPOLINE;
struct extra_kernel_map xm = {
.start = va,
.end = va + page_size,
.pgoff = pgoff,
};
strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
return -1;
}
machine->trampolines_mapped = nr_cpus_avail;
return 0;
}
int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
struct dso *kernel __maybe_unused)
{
return 0;
}
static int
__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
/* In case of renewal the kernel map, destroy previous one */
machine__destroy_kernel_maps(machine);
map__put(machine->vmlinux_map);
machine->vmlinux_map = map__new2(0, kernel);
if (machine->vmlinux_map == NULL)
return -ENOMEM;
map__set_map_ip(machine->vmlinux_map, identity__map_ip);
map__set_unmap_ip(machine->vmlinux_map, identity__map_ip);
return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
}
void machine__destroy_kernel_maps(struct machine *machine)
{
struct kmap *kmap;
struct map *map = machine__kernel_map(machine);
if (map == NULL)
return;
kmap = map__kmap(map);
maps__remove(machine__kernel_maps(machine), map);
if (kmap && kmap->ref_reloc_sym) {
zfree((char **)&kmap->ref_reloc_sym->name);
zfree(&kmap->ref_reloc_sym);
}
map__zput(machine->vmlinux_map);
}
int machines__create_guest_kernel_maps(struct machines *machines)
{
int ret = 0;
struct dirent **namelist = NULL;
int i, items = 0;
char path[PATH_MAX];
pid_t pid;
char *endp;
if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_modules ||
symbol_conf.default_guest_kallsyms) {
machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
}
if (symbol_conf.guestmount) {
items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
if (items <= 0)
return -ENOENT;
for (i = 0; i < items; i++) {
if (!isdigit(namelist[i]->d_name[0])) {
/* Filter out . and .. */
continue;
}
pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
if ((*endp != '\0') ||
(endp == namelist[i]->d_name) ||
(errno == ERANGE)) {
pr_debug("invalid directory (%s). Skipping.\n",
namelist[i]->d_name);
continue;
}
sprintf(path, "%s/%s/proc/kallsyms",
symbol_conf.guestmount,
namelist[i]->d_name);
ret = access(path, R_OK);
if (ret) {
pr_debug("Can't access file %s\n", path);
goto failure;
}
machines__create_kernel_maps(machines, pid);
}
failure:
free(namelist);
}
return ret;
}
void machines__destroy_kernel_maps(struct machines *machines)
{
struct rb_node *next = rb_first_cached(&machines->guests);
machine__destroy_kernel_maps(&machines->host);
while (next) {
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
rb_erase_cached(&pos->rb_node, &machines->guests);
machine__delete(pos);
}
}
int machines__create_kernel_maps(struct machines *machines, pid_t pid)
{
struct machine *machine = machines__findnew(machines, pid);
if (machine == NULL)
return -1;
return machine__create_kernel_maps(machine);
}
int machine__load_kallsyms(struct machine *machine, const char *filename)
{
struct map *map = machine__kernel_map(machine);
struct dso *dso = map__dso(map);
int ret = __dso__load_kallsyms(dso, filename, map, true);
if (ret > 0) {
dso__set_loaded(dso);
/*
* Since /proc/kallsyms will have multiple sessions for the
* kernel, with modules between them, fixup the end of all
* sections.
*/
maps__fixup_end(machine__kernel_maps(machine));
}
return ret;
}
int machine__load_vmlinux_path(struct machine *machine)
{
struct map *map = machine__kernel_map(machine);
struct dso *dso = map__dso(map);
int ret = dso__load_vmlinux_path(dso, map);
if (ret > 0)
dso__set_loaded(dso);
return ret;
}
static char *get_kernel_version(const char *root_dir)
{
char version[PATH_MAX];
FILE *file;
char *name, *tmp;
const char *prefix = "Linux version ";
sprintf(version, "%s/proc/version", root_dir);
file = fopen(version, "r");
if (!file)
return NULL;
tmp = fgets(version, sizeof(version), file);
fclose(file);
if (!tmp)
return NULL;
name = strstr(version, prefix);
if (!name)
return NULL;
name += strlen(prefix);
tmp = strchr(name, ' ');
if (tmp)
*tmp = '\0';
return strdup(name);
}
static bool is_kmod_dso(struct dso *dso)
{
return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
}
static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
{
char *long_name;
struct dso *dso;
struct map *map = maps__find_by_name(maps, m->name);
if (map == NULL)
return 0;
long_name = strdup(path);
if (long_name == NULL)
return -ENOMEM;
dso = map__dso(map);
dso__set_long_name(dso, long_name, true);
dso__kernel_module_get_build_id(dso, "");
/*
* Full name could reveal us kmod compression, so
* we need to update the symtab_type if needed.
*/
if (m->comp && is_kmod_dso(dso)) {
dso->symtab_type++;
dso->comp = m->comp;
}
return 0;
}
static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
int ret = 0;
if (!dir) {
pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
return -1;
}
while ((dent = readdir(dir)) != NULL) {
char path[PATH_MAX];
struct stat st;
/*sshfs might return bad dent->d_type, so we have to stat*/
path__join(path, sizeof(path), dir_name, dent->d_name);
if (stat(path, &st))
continue;
if (S_ISDIR(st.st_mode)) {
if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, ".."))
continue;
/* Do not follow top-level source and build symlinks */
if (depth == 0) {
if (!strcmp(dent->d_name, "source") ||
!strcmp(dent->d_name, "build"))
continue;
}
ret = maps__set_modules_path_dir(maps, path, depth + 1);
if (ret < 0)
goto out;
} else {
struct kmod_path m;
ret = kmod_path__parse_name(&m, dent->d_name);
if (ret)
goto out;
if (m.kmod)
ret = maps__set_module_path(maps, path, &m);
zfree(&m.name);
if (ret)
goto out;
}
}
out:
closedir(dir);
return ret;
}
static int machine__set_modules_path(struct machine *machine)
{
char *version;
char modules_path[PATH_MAX];
version = get_kernel_version(machine->root_dir);
if (!version)
return -1;
snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
machine->root_dir, version);
free(version);
return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
}
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
u64 *size __maybe_unused,
const char *name __maybe_unused)
{
return 0;
}
static int machine__create_module(void *arg, const char *name, u64 start,
u64 size)
{
struct machine *machine = arg;
struct map *map;
if (arch__fix_module_text_start(&start, &size, name) < 0)
return -1;
map = machine__addnew_module_map(machine, start, name);
if (map == NULL)
return -1;
map__set_end(map, start + size);
dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
map__put(map);
return 0;
}
static int machine__create_modules(struct machine *machine)
{
const char *modules;
char path[PATH_MAX];
if (machine__is_default_guest(machine)) {
modules = symbol_conf.default_guest_modules;
} else {
snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
modules = path;
}
if (symbol__restricted_filename(modules, "/proc/modules"))
return -1;
if (modules__parse(modules, machine, machine__create_module))
return -1;
if (!machine__set_modules_path(machine))
return 0;
pr_debug("Problems setting modules path maps, continuing anyway...\n");
return 0;
}
static void machine__set_kernel_mmap(struct machine *machine,
u64 start, u64 end)
{
map__set_start(machine->vmlinux_map, start);
map__set_end(machine->vmlinux_map, end);
/*
* Be a bit paranoid here, some perf.data file came with
* a zero sized synthesized MMAP event for the kernel.
*/
if (start == 0 && end == 0)
map__set_end(machine->vmlinux_map, ~0ULL);
}
static int machine__update_kernel_mmap(struct machine *machine,
u64 start, u64 end)
{
struct map *orig, *updated;
int err;
orig = machine->vmlinux_map;
updated = map__get(orig);
machine->vmlinux_map = updated;
machine__set_kernel_mmap(machine, start, end);
maps__remove(machine__kernel_maps(machine), orig);
err = maps__insert(machine__kernel_maps(machine), updated);
map__put(orig);
return err;
}
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
const char *name = NULL;
u64 start = 0, end = ~0ULL;
int ret;
if (kernel == NULL)
return -1;
ret = __machine__create_kernel_maps(machine, kernel);
if (ret < 0)
goto out_put;
if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
if (machine__is_host(machine))
pr_debug("Problems creating module maps, "
"continuing anyway...\n");
else
pr_debug("Problems creating module maps for guest %d, "
"continuing anyway...\n", machine->pid);
}
if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
if (name &&
map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
machine__destroy_kernel_maps(machine);
ret = -1;
goto out_put;
}
/*
* we have a real start address now, so re-order the kmaps
* assume it's the last in the kmaps
*/
ret = machine__update_kernel_mmap(machine, start, end);
if (ret < 0)
goto out_put;
}
if (machine__create_extra_kernel_maps(machine, kernel))
pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
if (end == ~0ULL) {
/* update end address of the kernel map using adjacent module address */
struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine),
machine__kernel_map(machine));
struct map_rb_node *next = map_rb_node__next(rb_node);
if (next)
machine__set_kernel_mmap(machine, start, map__start(next->map));
}
out_put:
dso__put(kernel);
return ret;
}
static bool machine__uses_kcore(struct machine *machine)
{
struct dso *dso;
list_for_each_entry(dso, &machine->dsos.head, node) {
if (dso__is_kcore(dso))
return true;
}
return false;
}
static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
struct extra_kernel_map *xm)
{
return machine__is(machine, "x86_64") &&
is_entry_trampoline(xm->name);
}
static int machine__process_extra_kernel_map(struct machine *machine,
struct extra_kernel_map *xm)
{
struct dso *kernel = machine__kernel_dso(machine);
if (kernel == NULL)
return -1;
return machine__create_extra_kernel_map(machine, kernel, xm);
}
static int machine__process_kernel_mmap_event(struct machine *machine,
struct extra_kernel_map *xm,
struct build_id *bid)
{
enum dso_space_type dso_space;
bool is_kernel_mmap;
const char *mmap_name = machine->mmap_name;
/* If we have maps from kcore then we do not need or want any others */
if (machine__uses_kcore(machine))
return 0;
if (machine__is_host(machine))
dso_space = DSO_SPACE__KERNEL;
else
dso_space = DSO_SPACE__KERNEL_GUEST;
is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
if (!is_kernel_mmap && !machine__is_host(machine)) {
/*
* If the event was recorded inside the guest and injected into
* the host perf.data file, then it will match a host mmap_name,
* so try that - see machine__set_mmap_name().
*/
mmap_name = "[kernel.kallsyms]";
is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
}
if (xm->name[0] == '/' ||
(!is_kernel_mmap && xm->name[0] == '[')) {
struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
if (map == NULL)
goto out_problem;
map__set_end(map, map__start(map) + xm->end - xm->start);
if (build_id__is_defined(bid))
dso__set_build_id(map__dso(map), bid);
map__put(map);
} else if (is_kernel_mmap) {
const char *symbol_name = xm->name + strlen(mmap_name);
/*
* Should be there already, from the build-id table in
* the header.
*/
struct dso *kernel = NULL;
struct dso *dso;
down_read(&machine->dsos.lock);
list_for_each_entry(dso, &machine->dsos.head, node) {
/*
* The cpumode passed to is_kernel_module is not the
* cpumode of *this* event. If we insist on passing
* correct cpumode to is_kernel_module, we should
* record the cpumode when we adding this dso to the
* linked list.
*
* However we don't really need passing correct
* cpumode. We know the correct cpumode must be kernel
* mode (if not, we should not link it onto kernel_dsos
* list).
*
* Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
* is_kernel_module() treats it as a kernel cpumode.
*/
if (!dso->kernel ||
is_kernel_module(dso->long_name,
PERF_RECORD_MISC_CPUMODE_UNKNOWN))
continue;
kernel = dso__get(dso);
break;
}
up_read(&machine->dsos.lock);
if (kernel == NULL)
kernel = machine__findnew_dso(machine, machine->mmap_name);
if (kernel == NULL)
goto out_problem;
kernel->kernel = dso_space;
if (__machine__create_kernel_maps(machine, kernel) < 0) {
dso__put(kernel);
goto out_problem;
}
if (strstr(kernel->long_name, "vmlinux"))
dso__set_short_name(kernel, "[kernel.vmlinux]", false);
if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
dso__put(kernel);
goto out_problem;
}
if (build_id__is_defined(bid))
dso__set_build_id(kernel, bid);
/*
* Avoid using a zero address (kptr_restrict) for the ref reloc
* symbol. Effectively having zero here means that at record
* time /proc/sys/kernel/kptr_restrict was non zero.
*/
if (xm->pgoff != 0) {
map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
symbol_name,
xm->pgoff);
}
if (machine__is_default_guest(machine)) {
/*
* preload dso of guest kernel and modules
*/
dso__load(kernel, machine__kernel_map(machine));
}
dso__put(kernel);
} else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
return machine__process_extra_kernel_map(machine, xm);
}
return 0;
out_problem:
return -1;
}
int machine__process_mmap2_event(struct machine *machine,
union perf_event *event,
struct perf_sample *sample)
{
struct thread *thread;
struct map *map;
struct dso_id dso_id = {
.maj = event->mmap2.maj,
.min = event->mmap2.min,
.ino = event->mmap2.ino,
.ino_generation = event->mmap2.ino_generation,
};
struct build_id __bid, *bid = NULL;
int ret = 0;
if (dump_trace)
perf_event__fprintf_mmap2(event, stdout);
if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
bid = &__bid;
build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
}
if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
sample->cpumode == PERF_RECORD_MISC_KERNEL) {
struct extra_kernel_map xm = {
.start = event->mmap2.start,
.end = event->mmap2.start + event->mmap2.len,
.pgoff = event->mmap2.pgoff,
};
strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
ret = machine__process_kernel_mmap_event(machine, &xm, bid);
if (ret < 0)
goto out_problem;
return 0;
}
thread = machine__findnew_thread(machine, event->mmap2.pid,
event->mmap2.tid);
if (thread == NULL)
goto out_problem;
map = map__new(machine, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff,
&dso_id, event->mmap2.prot,
event->mmap2.flags, bid,
event->mmap2.filename, thread);
if (map == NULL)
goto out_problem_map;
ret = thread__insert_map(thread, map);
if (ret)
goto out_problem_insert;
thread__put(thread);
map__put(map);
return 0;
out_problem_insert:
map__put(map);
out_problem_map:
thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
return 0;
}
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
struct thread *thread;
struct map *map;
u32 prot = 0;
int ret = 0;
if (dump_trace)
perf_event__fprintf_mmap(event, stdout);
if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
sample->cpumode == PERF_RECORD_MISC_KERNEL) {
struct extra_kernel_map xm = {
.start = event->mmap.start,
.end = event->mmap.start + event->mmap.len,
.pgoff = event->mmap.pgoff,
};
strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
if (ret < 0)
goto out_problem;
return 0;
}
thread = machine__findnew_thread(machine, event->mmap.pid,
event->mmap.tid);
if (thread == NULL)
goto out_problem;
if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
prot = PROT_EXEC;
map = map__new(machine, event->mmap.start,
event->mmap.len, event->mmap.pgoff,
NULL, prot, 0, NULL, event->mmap.filename, thread);
if (map == NULL)
goto out_problem_map;
ret = thread__insert_map(thread, map);
if (ret)
goto out_problem_insert;
thread__put(thread);
map__put(map);
return 0;
out_problem_insert:
map__put(map);
out_problem_map:
thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
struct thread *th, bool lock)
{
struct threads *threads = machine__threads(machine, thread__tid(th));
if (!nd)
nd = thread_rb_node__find(th, &threads->entries.rb_root);
if (threads->last_match && RC_CHK_ACCESS(threads->last_match) == RC_CHK_ACCESS(th))
threads__set_last_match(threads, NULL);
if (lock)
down_write(&threads->lock);
BUG_ON(refcount_read(thread__refcnt(th)) == 0);
thread__put(nd->thread);
rb_erase_cached(&nd->rb_node, &threads->entries);
RB_CLEAR_NODE(&nd->rb_node);
--threads->nr;
free(nd);
if (lock)
up_write(&threads->lock);
}
void machine__remove_thread(struct machine *machine, struct thread *th)
{
return __machine__remove_thread(machine, NULL, th, true);
}
int machine__process_fork_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
struct thread *thread = machine__find_thread(machine,
event->fork.pid,
event->fork.tid);
struct thread *parent = machine__findnew_thread(machine,
event->fork.ppid,
event->fork.ptid);
bool do_maps_clone = true;
int err = 0;
if (dump_trace)
perf_event__fprintf_task(event, stdout);
/*
* There may be an existing thread that is not actually the parent,
* either because we are processing events out of order, or because the
* (fork) event that would have removed the thread was lost. Assume the
* latter case and continue on as best we can.
*/
if (thread__pid(parent) != (pid_t)event->fork.ppid) {
dump_printf("removing erroneous parent thread %d/%d\n",
thread__pid(parent), thread__tid(parent));
machine__remove_thread(machine, parent);
thread__put(parent);
parent = machine__findnew_thread(machine, event->fork.ppid,
event->fork.ptid);
}
/* if a thread currently exists for the thread id remove it */
if (thread != NULL) {
machine__remove_thread(machine, thread);
thread__put(thread);
}
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
/*
* When synthesizing FORK events, we are trying to create thread
* objects for the already running tasks on the machine.
*
* Normally, for a kernel FORK event, we want to clone the parent's
* maps because that is what the kernel just did.
*
* But when synthesizing, this should not be done. If we do, we end up
* with overlapping maps as we process the synthesized MMAP2 events that
* get delivered shortly thereafter.
*
* Use the FORK event misc flags in an internal way to signal this
* situation, so we can elide the map clone when appropriate.
*/
if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
do_maps_clone = false;
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
err = -1;
}
thread__put(thread);
thread__put(parent);
return err;
}
int machine__process_exit_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct thread *thread = machine__find_thread(machine,
event->fork.pid,
event->fork.tid);
if (dump_trace)
perf_event__fprintf_task(event, stdout);
if (thread != NULL)
thread__put(thread);
return 0;
}
int machine__process_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
int ret;
switch (event->header.type) {
case PERF_RECORD_COMM:
ret = machine__process_comm_event(machine, event, sample); break;
case PERF_RECORD_MMAP:
ret = machine__process_mmap_event(machine, event, sample); break;
case PERF_RECORD_NAMESPACES:
ret = machine__process_namespaces_event(machine, event, sample); break;
case PERF_RECORD_CGROUP:
ret = machine__process_cgroup_event(machine, event, sample); break;
case PERF_RECORD_MMAP2:
ret = machine__process_mmap2_event(machine, event, sample); break;
case PERF_RECORD_FORK:
ret = machine__process_fork_event(machine, event, sample); break;
case PERF_RECORD_EXIT:
ret = machine__process_exit_event(machine, event, sample); break;
case PERF_RECORD_LOST:
ret = machine__process_lost_event(machine, event, sample); break;
case PERF_RECORD_AUX:
ret = machine__process_aux_event(machine, event); break;
case PERF_RECORD_ITRACE_START:
ret = machine__process_itrace_start_event(machine, event); break;
case PERF_RECORD_LOST_SAMPLES:
ret = machine__process_lost_samples_event(machine, event, sample); break;
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
ret = machine__process_switch_event(machine, event); break;
case PERF_RECORD_KSYMBOL:
ret = machine__process_ksymbol(machine, event, sample); break;
case PERF_RECORD_BPF_EVENT:
ret = machine__process_bpf(machine, event, sample); break;
case PERF_RECORD_TEXT_POKE:
ret = machine__process_text_poke(machine, event, sample); break;
case PERF_RECORD_AUX_OUTPUT_HW_ID:
ret = machine__process_aux_output_hw_id_event(machine, event); break;
default:
ret = -1;
break;
}
return ret;
}
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
{
if (!regexec(regex, sym->name, 0, NULL, 0))
return true;
return false;
}
static void ip__resolve_ams(struct thread *thread,
struct addr_map_symbol *ams,
u64 ip)
{
struct addr_location al;
addr_location__init(&al);
/*
* We cannot use the header.misc hint to determine whether a
* branch stack address is user, kernel, guest, hypervisor.
* Branches may straddle the kernel/user/hypervisor boundaries.
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
thread__find_cpumode_addr_location(thread, ip, &al);
ams->addr = ip;
ams->al_addr = al.addr;
ams->al_level = al.level;
ams->ms.maps = maps__get(al.maps);
ams->ms.sym = al.sym;
ams->ms.map = map__get(al.map);
ams->phys_addr = 0;
ams->data_page_size = 0;
addr_location__exit(&al);
}
static void ip__resolve_data(struct thread *thread,
u8 m, struct addr_map_symbol *ams,
u64 addr, u64 phys_addr, u64 daddr_page_size)
{
struct addr_location al;
addr_location__init(&al);
thread__find_symbol(thread, m, addr, &al);
ams->addr = addr;
ams->al_addr = al.addr;
ams->al_level = al.level;
ams->ms.maps = maps__get(al.maps);
ams->ms.sym = al.sym;
ams->ms.map = map__get(al.map);
ams->phys_addr = phys_addr;
ams->data_page_size = daddr_page_size;
addr_location__exit(&al);
}
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
struct addr_location *al)
{
struct mem_info *mi = mem_info__new();
if (!mi)
return NULL;
ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
sample->addr, sample->phys_addr,
sample->data_page_size);
mi->data_src.val = sample->data_src;
return mi;
}
static char *callchain_srcline(struct map_symbol *ms, u64 ip)
{
struct map *map = ms->map;
char *srcline = NULL;
struct dso *dso;
if (!map || callchain_param.key == CCKEY_FUNCTION)
return srcline;
dso = map__dso(map);
srcline = srcline__tree_find(&dso->srclines, ip);
if (!srcline) {
bool show_sym = false;
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
srcline = get_srcline(dso, map__rip_2objdump(map, ip),
ms->sym, show_sym, show_addr, ip);
srcline__tree_insert(&dso->srclines, ip, srcline);
}
return srcline;
}
struct iterations {
int nr_loop_iter;
u64 cycles;
};
static int add_callchain_ip(struct thread *thread,
struct callchain_cursor *cursor,
struct symbol **parent,
struct addr_location *root_al,
u8 *cpumode,
u64 ip,
bool branch,
struct branch_flags *flags,
struct iterations *iter,
u64 branch_from)
{
struct map_symbol ms = {};
struct addr_location al;
int nr_loop_iter = 0, err = 0;
u64 iter_cycles = 0;
const char *srcline = NULL;
addr_location__init(&al);
al.filtered = 0;
al.sym = NULL;
al.srcline = NULL;
if (!cpumode) {
thread__find_cpumode_addr_location(thread, ip, &al);
} else {
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
*cpumode = PERF_RECORD_MISC_HYPERVISOR;
break;
case PERF_CONTEXT_KERNEL:
*cpumode = PERF_RECORD_MISC_KERNEL;
break;
case PERF_CONTEXT_USER:
*cpumode = PERF_RECORD_MISC_USER;
break;
default:
pr_debug("invalid callchain context: "
"%"PRId64"\n", (s64) ip);
/*
* It seems the callchain is corrupted.
* Discard all.
*/
callchain_cursor_reset(cursor);
err = 1;
goto out;
}
goto out;
}
thread__find_symbol(thread, *cpumode, ip, &al);
}
if (al.sym != NULL) {
if (perf_hpp_list.parent && !*parent &&
symbol__match_regex(al.sym, &parent_regex))
*parent = al.sym;
else if (have_ignore_callees && root_al &&
symbol__match_regex(al.sym, &ignore_callees_regex)) {
/* Treat this symbol as the root,
forgetting its callees. */
addr_location__copy(root_al, &al);
callchain_cursor_reset(cursor);
}
}
if (symbol_conf.hide_unresolved && al.sym == NULL)
goto out;
if (iter) {
nr_loop_iter = iter->nr_loop_iter;
iter_cycles = iter->cycles;
}
ms.maps = maps__get(al.maps);
ms.map = map__get(al.map);
ms.sym = al.sym;
srcline = callchain_srcline(&ms, al.addr);
err = callchain_cursor_append(cursor, ip, &ms,
branch, flags, nr_loop_iter,
iter_cycles, branch_from, srcline);
out:
addr_location__exit(&al);
maps__put(ms.maps);
map__put(ms.map);
return err;
}
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
struct addr_location *al)
{
unsigned int i;
const struct branch_stack *bs = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
return NULL;
for (i = 0; i < bs->nr; i++) {
ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
bi[i].flags = entries[i].flags;
}
return bi;
}
static void save_iterations(struct iterations *iter,
struct branch_entry *be, int nr)
{
int i;
iter->nr_loop_iter++;
iter->cycles = 0;
for (i = 0; i < nr; i++)
iter->cycles += be[i].flags.cycles;
}
#define CHASHSZ 127
#define CHASHBITS 7
#define NO_ENTRY 0xff
#define PERF_MAX_BRANCH_DEPTH 127
/* Remove loops. */
static int remove_loops(struct branch_entry *l, int nr,
struct iterations *iter)
{
int i, j, off;
unsigned char chash[CHASHSZ];
memset(chash, NO_ENTRY, sizeof(chash));
BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
for (i = 0; i < nr; i++) {
int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
/* no collision handling for now */
if (chash[h] == NO_ENTRY) {
chash[h] = i;
} else if (l[chash[h]].from == l[i].from) {
bool is_loop = true;
/* check if it is a real loop */
off = 0;
for (j = chash[h]; j < i && i + off < nr; j++, off++)
if (l[j].from != l[i + off].from) {
is_loop = false;
break;
}
if (is_loop) {
j = nr - (i + off);
if (j > 0) {
save_iterations(iter + i + off,
l + i, off);
memmove(iter + i, iter + i + off,
j * sizeof(*iter));
memmove(l + i, l + i + off,
j * sizeof(*l));
}
nr -= off;
}
}
}
return nr;
}
static int lbr_callchain_add_kernel_ip(struct thread *thread,
struct callchain_cursor *cursor,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
u64 branch_from,
bool callee, int end)
{
struct ip_callchain *chain = sample->callchain;
u8 cpumode = PERF_RECORD_MISC_USER;
int err, i;
if (callee) {
for (i = 0; i < end + 1; i++) {
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, chain->ips[i],
false, NULL, NULL, branch_from);
if (err)
return err;
}
return 0;
}
for (i = end; i >= 0; i--) {
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, chain->ips[i],
false, NULL, NULL, branch_from);
if (err)
return err;
}
return 0;
}
static void save_lbr_cursor_node(struct thread *thread,
struct callchain_cursor *cursor,
int idx)
{
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
if (!lbr_stitch)
return;
if (cursor->pos == cursor->nr) {
lbr_stitch->prev_lbr_cursor[idx].valid = false;
return;
}
if (!cursor->curr)
cursor->curr = cursor->first;
else
cursor->curr = cursor->curr->next;
memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
sizeof(struct callchain_cursor_node));
lbr_stitch->prev_lbr_cursor[idx].valid = true;
cursor->pos++;
}
static int lbr_callchain_add_lbr_ip(struct thread *thread,
struct callchain_cursor *cursor,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
u64 *branch_from,
bool callee)
{
struct branch_stack *lbr_stack = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
u8 cpumode = PERF_RECORD_MISC_USER;
int lbr_nr = lbr_stack->nr;
struct branch_flags *flags;
int err, i;
u64 ip;
/*
* The curr and pos are not used in writing session. They are cleared
* in callchain_cursor_commit() when the writing session is closed.
* Using curr and pos to track the current cursor node.
*/
if (thread__lbr_stitch(thread)) {
cursor->curr = NULL;
cursor->pos = cursor->nr;
if (cursor->nr) {
cursor->curr = cursor->first;
for (i = 0; i < (int)(cursor->nr - 1); i++)
cursor->curr = cursor->curr->next;
}
}
if (callee) {
/* Add LBR ip from first entries.to */
ip = entries[0].to;
flags = &entries[0].flags;
*branch_from = entries[0].from;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
*branch_from);
if (err)
return err;
/*
* The number of cursor node increases.
* Move the current cursor node.
* But does not need to save current cursor node for entry 0.
* It's impossible to stitch the whole LBRs of previous sample.
*/
if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
if (!cursor->curr)
cursor->curr = cursor->first;
else
cursor->curr = cursor->curr->next;
cursor->pos++;
}
/* Add LBR ip from entries.from one by one. */
for (i = 0; i < lbr_nr; i++) {
ip = entries[i].from;
flags = &entries[i].flags;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
*branch_from);
if (err)
return err;
save_lbr_cursor_node(thread, cursor, i);
}
return 0;
}
/* Add LBR ip from entries.from one by one. */
for (i = lbr_nr - 1; i >= 0; i--) {
ip = entries[i].from;
flags = &entries[i].flags;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
*branch_from);
if (err)
return err;
save_lbr_cursor_node(thread, cursor, i);
}
/* Add LBR ip from first entries.to */
ip = entries[0].to;
flags = &entries[0].flags;
*branch_from = entries[0].from;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
*branch_from);
if (err)
return err;
return 0;
}
static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
struct callchain_cursor *cursor)
{
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct callchain_cursor_node *cnode;
struct stitch_list *stitch_node;
int err;
list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
cnode = &stitch_node->cursor;
err = callchain_cursor_append(cursor, cnode->ip,
&cnode->ms,
cnode->branch,
&cnode->branch_flags,
cnode->nr_loop_iter,
cnode->iter_cycles,
cnode->branch_from,
cnode->srcline);
if (err)
return err;
}
return 0;
}
static struct stitch_list *get_stitch_node(struct thread *thread)
{
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct stitch_list *stitch_node;
if (!list_empty(&lbr_stitch->free_lists)) {
stitch_node = list_first_entry(&lbr_stitch->free_lists,
struct stitch_list, node);
list_del(&stitch_node->node);
return stitch_node;
}
return malloc(sizeof(struct stitch_list));
}
static bool has_stitched_lbr(struct thread *thread,
struct perf_sample *cur,
struct perf_sample *prev,
unsigned int max_lbr,
bool callee)
{
struct branch_stack *cur_stack = cur->branch_stack;
struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
struct branch_stack *prev_stack = prev->branch_stack;
struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
int i, j, nr_identical_branches = 0;
struct stitch_list *stitch_node;
u64 cur_base, distance;
if (!cur_stack || !prev_stack)
return false;
/* Find the physical index of the base-of-stack for current sample. */
cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
(max_lbr + prev_stack->hw_idx - cur_base);
/* Previous sample has shorter stack. Nothing can be stitched. */
if (distance + 1 > prev_stack->nr)
return false;
/*
* Check if there are identical LBRs between two samples.
* Identical LBRs must have same from, to and flags values. Also,
* they have to be saved in the same LBR registers (same physical
* index).
*
* Starts from the base-of-stack of current sample.
*/
for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
if ((prev_entries[i].from != cur_entries[j].from) ||
(prev_entries[i].to != cur_entries[j].to) ||
(prev_entries[i].flags.value != cur_entries[j].flags.value))
break;
nr_identical_branches++;
}
if (!nr_identical_branches)
return false;
/*
* Save the LBRs between the base-of-stack of previous sample
* and the base-of-stack of current sample into lbr_stitch->lists.
* These LBRs will be stitched later.
*/
for (i = prev_stack->nr - 1; i > (int)distance; i--) {
if (!lbr_stitch->prev_lbr_cursor[i].valid)
continue;
stitch_node = get_stitch_node(thread);
if (!stitch_node)
return false;
memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
sizeof(struct callchain_cursor_node));
if (callee)
list_add(&stitch_node->node, &lbr_stitch->lists);
else
list_add_tail(&stitch_node->node, &lbr_stitch->lists);
}
return true;
}
static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
{
if (thread__lbr_stitch(thread))
return true;
thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
if (!thread__lbr_stitch(thread))
goto err;
thread__lbr_stitch(thread)->prev_lbr_cursor =
calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
goto free_lbr_stitch;
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
return true;
free_lbr_stitch:
free(thread__lbr_stitch(thread));
thread__set_lbr_stitch(thread, NULL);
err:
pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
thread__set_lbr_stitch_enable(thread, false);
return false;
}
/*
* Resolve LBR callstack chain sample
* Return:
* 1 on success get LBR callchain information
* 0 no available LBR callchain information, should try fp
* negative error code on other errors.
*/
static int resolve_lbr_callchain_sample(struct thread *thread,
struct callchain_cursor *cursor,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
int max_stack,
unsigned int max_lbr)
{
bool callee = (callchain_param.order == ORDER_CALLEE);
struct ip_callchain *chain = sample->callchain;
int chain_nr = min(max_stack, (int)chain->nr), i;
struct lbr_stitch *lbr_stitch;
bool stitched_lbr = false;
u64 branch_from = 0;
int err;
for (i = 0; i < chain_nr; i++) {
if (chain->ips[i] == PERF_CONTEXT_USER)
break;
}
/* LBR only affects the user callchain */
if (i == chain_nr)
return 0;
if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
(max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
lbr_stitch = thread__lbr_stitch(thread);
stitched_lbr = has_stitched_lbr(thread, sample,
&lbr_stitch->prev_sample,
max_lbr, callee);
if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
list_replace_init(&lbr_stitch->lists,
&lbr_stitch->free_lists);
}
memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
}
if (callee) {
/* Add kernel ip */
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
parent, root_al, branch_from,
true, i);
if (err)
goto error;
err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
root_al, &branch_from, true);
if (err)
goto error;
if (stitched_lbr) {
err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
if (err)
goto error;
}
} else {
if (stitched_lbr) {
err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
if (err)
goto error;
}
err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
root_al, &branch_from, false);
if (err)
goto error;
/* Add kernel ip */
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
parent, root_al, branch_from,
false, i);
if (err)
goto error;
}
return 1;
error:
return (err < 0) ? err : 0;
}
static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
struct callchain_cursor *cursor,
struct symbol **parent,
struct addr_location *root_al,
u8 *cpumode, int ent)
{
int err = 0;
while (--ent >= 0) {
u64 ip = chain->ips[ent];
if (ip >= PERF_CONTEXT_MAX) {
err = add_callchain_ip(thread, cursor, parent,
root_al, cpumode, ip,
false, NULL, NULL, 0);
break;
}
}
return err;
}
static u64 get_leaf_frame_caller(struct perf_sample *sample,
struct thread *thread, int usr_idx)
{
if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
else
return 0;
}
static int thread__resolve_callchain_sample(struct thread *thread,
struct callchain_cursor *cursor,
struct evsel *evsel,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
int max_stack)
{
struct branch_stack *branch = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
struct ip_callchain *chain = sample->callchain;
int chain_nr = 0;
u8 cpumode = PERF_RECORD_MISC_USER;
int i, j, err, nr_entries, usr_idx;
int skip_idx = -1;
int first_call = 0;
u64 leaf_frame_caller;
if (chain)
chain_nr = chain->nr;
if (evsel__has_branch_callstack(evsel)) {
struct perf_env *env = evsel__env(evsel);
err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
root_al, max_stack,
!env ? 0 : env->max_branches);
if (err)
return (err < 0) ? err : 0;
}
/*
* Based on DWARF debug information, some architectures skip
* a callchain entry saved by the kernel.
*/
skip_idx = arch_skip_callchain_idx(thread, chain);
/*
* Add branches to call stack for easier browsing. This gives
* more context for a sample than just the callers.
*
* This uses individual histograms of paths compared to the
* aggregated histograms the normal LBR mode uses.
*
* Limitations for now:
* - No extra filters
* - No annotations (should annotate somehow)
*/
if (branch && callchain_param.branch_callstack) {
int nr = min(max_stack, (int)branch->nr);
struct branch_entry be[nr];
struct iterations iter[nr];
if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
pr_warning("corrupted branch chain. skipping...\n");
goto check_calls;
}
for (i = 0; i < nr; i++) {
if (callchain_param.order == ORDER_CALLEE) {
be[i] = entries[i];
if (chain == NULL)
continue;
/*
* Check for overlap into the callchain.
* The return address is one off compared to
* the branch entry. To adjust for this
* assume the calling instruction is not longer
* than 8 bytes.
*/
if (i == skip_idx ||
chain->ips[first_call] >= PERF_CONTEXT_MAX)
first_call++;
else if (be[i].from < chain->ips[first_call] &&
be[i].from >= chain->ips[first_call] - 8)
first_call++;
} else
be[i] = entries[branch->nr - i - 1];
}
memset(iter, 0, sizeof(struct iterations) * nr);
nr = remove_loops(be, nr, iter);
for (i = 0; i < nr; i++) {
err = add_callchain_ip(thread, cursor, parent,
root_al,
NULL, be[i].to,
true, &be[i].flags,
NULL, be[i].from);
if (!err)
err = add_callchain_ip(thread, cursor, parent, root_al,
NULL, be[i].from,
true, &be[i].flags,
&iter[i], 0);
if (err == -EINVAL)
break;
if (err)
return err;
}
if (chain_nr == 0)
return 0;
chain_nr -= nr;
}
check_calls:
if (chain && callchain_param.order != ORDER_CALLEE) {
err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
&cpumode, chain->nr - first_call);
if (err)
return (err < 0) ? err : 0;
}
for (i = first_call, nr_entries = 0;
i < chain_nr && nr_entries < max_stack; i++) {
u64 ip;
if (callchain_param.order == ORDER_CALLEE)
j = i;
else
j = chain->nr - i - 1;
#ifdef HAVE_SKIP_CALLCHAIN_IDX
if (j == skip_idx)
continue;
#endif
ip = chain->ips[j];
if (ip < PERF_CONTEXT_MAX)
++nr_entries;
else if (callchain_param.order != ORDER_CALLEE) {
err = find_prev_cpumode(chain, thread, cursor, parent,
root_al, &cpumode, j);
if (err)
return (err < 0) ? err : 0;
continue;
}
/*
* PERF_CONTEXT_USER allows us to locate where the user stack ends.
* Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
* the index will be different in order to add the missing frame
* at the right place.
*/
usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
/*
* check if leaf_frame_Caller != ip to not add the same
* value twice.
*/
if (leaf_frame_caller && leaf_frame_caller != ip) {
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, leaf_frame_caller,
false, NULL, NULL, 0);
if (err)
return (err < 0) ? err : 0;
}
}
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
false, NULL, NULL, 0);
if (err)
return (err < 0) ? err : 0;
}
return 0;
}
static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
{
struct symbol *sym = ms->sym;
struct map *map = ms->map;
struct inline_node *inline_node;
struct inline_list *ilist;
struct dso *dso;
u64 addr;
int ret = 1;
struct map_symbol ilist_ms;
if (!symbol_conf.inline_name || !map || !sym)
return ret;
addr = map__dso_map_ip(map, ip);
addr = map__rip_2objdump(map, addr);
dso = map__dso(map);
inline_node = inlines__tree_find(&dso->inlined_nodes, addr);
if (!inline_node) {
inline_node = dso__parse_addr_inlines(dso, addr, sym);
if (!inline_node)
return ret;
inlines__tree_insert(&dso->inlined_nodes, inline_node);
}
ilist_ms = (struct map_symbol) {
.maps = maps__get(ms->maps),
.map = map__get(map),
};
list_for_each_entry(ilist, &inline_node->val, list) {
ilist_ms.sym = ilist->symbol;
ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
NULL, 0, 0, 0, ilist->srcline);
if (ret != 0)
return ret;
}
map__put(ilist_ms.map);
maps__put(ilist_ms.maps);
return ret;
}
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
const char *srcline = NULL;
u64 addr = entry->ip;
if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
return 0;
if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
return 0;
/*
* Convert entry->ip from a virtual address to an offset in
* its corresponding binary.
*/
if (entry->ms.map)
addr = map__dso_map_ip(entry->ms.map, entry->ip);
srcline = callchain_srcline(&entry->ms, addr);
return callchain_cursor_append(cursor, entry->ip, &entry->ms,
false, NULL, 0, 0, 0, srcline);
}
static int thread__resolve_callchain_unwind(struct thread *thread,
struct callchain_cursor *cursor,
struct evsel *evsel,
struct perf_sample *sample,
int max_stack)
{
/* Can we do dwarf post unwind? */
if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
(evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
return 0;
/* Bail out if nothing was captured. */
if ((!sample->user_regs.regs) ||
(!sample->user_stack.size))
return 0;
return unwind__get_entries(unwind_entry, cursor,
thread, sample, max_stack, false);
}
int thread__resolve_callchain(struct thread *thread,
struct callchain_cursor *cursor,
struct evsel *evsel,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
int max_stack)
{
int ret = 0;
if (cursor == NULL)
return -ENOMEM;
callchain_cursor_reset(cursor);
if (callchain_param.order == ORDER_CALLEE) {
ret = thread__resolve_callchain_sample(thread, cursor,
evsel, sample,
parent, root_al,
max_stack);
if (ret)
return ret;
ret = thread__resolve_callchain_unwind(thread, cursor,
evsel, sample,
max_stack);
} else {
ret = thread__resolve_callchain_unwind(thread, cursor,
evsel, sample,
max_stack);
if (ret)
return ret;
ret = thread__resolve_callchain_sample(thread, cursor,
evsel, sample,
parent, root_al,
max_stack);
}
return ret;
}
int machine__for_each_thread(struct machine *machine,
int (*fn)(struct thread *thread, void *p),
void *priv)
{
struct threads *threads;
struct rb_node *nd;
int rc = 0;
int i;
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
threads = &machine->threads[i];
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
rc = fn(trb->thread, priv);
if (rc != 0)
return rc;
}
}
return rc;
}
int machines__for_each_thread(struct machines *machines,
int (*fn)(struct thread *thread, void *p),
void *priv)
{
struct rb_node *nd;
int rc = 0;
rc = machine__for_each_thread(&machines->host, fn, priv);
if (rc != 0)
return rc;
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
rc = machine__for_each_thread(machine, fn, priv);
if (rc != 0)
return rc;
}
return rc;
}
pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
return -1;
return machine->current_tid[cpu];
}
int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
pid_t tid)
{
struct thread *thread;
const pid_t init_val = -1;
if (cpu < 0)
return -EINVAL;
if (realloc_array_as_needed(machine->current_tid,
machine->current_tid_sz,
(unsigned int)cpu,
&init_val))
return -ENOMEM;
machine->current_tid[cpu] = tid;
thread = machine__findnew_thread(machine, pid, tid);
if (!thread)
return -ENOMEM;
thread__set_cpu(thread, cpu);
thread__put(thread);
return 0;
}
/*
* Compares the raw arch string. N.B. see instead perf_env__arch() or
* machine__normalized_is() if a normalized arch is needed.
*/
bool machine__is(struct machine *machine, const char *arch)
{
return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
}
bool machine__normalized_is(struct machine *machine, const char *arch)
{
return machine && !strcmp(perf_env__arch(machine->env), arch);
}
int machine__nr_cpus_avail(struct machine *machine)
{
return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
}
int machine__get_kernel_start(struct machine *machine)
{
struct map *map = machine__kernel_map(machine);
int err = 0;
/*
* The only addresses above 2^63 are kernel addresses of a 64-bit
* kernel. Note that addresses are unsigned so that on a 32-bit system
* all addresses including kernel addresses are less than 2^32. In
* that case (32-bit system), if the kernel mapping is unknown, all
* addresses will be assumed to be in user space - see
* machine__kernel_ip().
*/
machine->kernel_start = 1ULL << 63;
if (map) {
err = map__load(map);
/*
* On x86_64, PTI entry trampolines are less than the
* start of kernel text, but still above 2^63. So leave
* kernel_start = 1ULL << 63 for x86_64.
*/
if (!err && !machine__is(machine, "x86_64"))
machine->kernel_start = map__start(map);
}
return err;
}
u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
{
u8 addr_cpumode = cpumode;
bool kernel_ip;
if (!machine->single_address_space)
goto out;
kernel_ip = machine__kernel_ip(machine, addr);
switch (cpumode) {
case PERF_RECORD_MISC_KERNEL:
case PERF_RECORD_MISC_USER:
addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
PERF_RECORD_MISC_USER;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
case PERF_RECORD_MISC_GUEST_USER:
addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
PERF_RECORD_MISC_GUEST_USER;
break;
default:
break;
}
out:
return addr_cpumode;
}
struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
{
return dsos__findnew_id(&machine->dsos, filename, id);
}
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
return machine__findnew_dso_id(machine, filename, NULL);
}
char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
{
struct machine *machine = vmachine;
struct map *map;
struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
if (sym == NULL)
return NULL;
*modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL;
*addrp = map__unmap_ip(map, sym->start);
return sym->name;
}
int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
{
struct dso *pos;
int err = 0;
list_for_each_entry(pos, &machine->dsos.head, node) {
if (fn(pos, machine, priv))
err = -1;
}
return err;
}
int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
{
struct maps *maps = machine__kernel_maps(machine);
struct map_rb_node *pos;
int err = 0;
maps__for_each_entry(maps, pos) {
err = fn(pos->map, priv);
if (err != 0) {
break;
}
}
return err;
}
bool machine__is_lock_function(struct machine *machine, u64 addr)
{
if (!machine->sched.text_start) {
struct map *kmap;
struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
if (!sym) {
/* to avoid retry */
machine->sched.text_start = 1;
return false;
}
machine->sched.text_start = map__unmap_ip(kmap, sym->start);
/* should not fail from here */
sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
machine->sched.text_end = map__unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
machine->lock.text_start = map__unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
machine->lock.text_end = map__unmap_ip(kmap, sym->start);
}
/* failed to get kernel symbols */
if (machine->sched.text_start == 1)
return false;
/* mutex and rwsem functions are in sched text section */
if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
return true;
/* spinlock functions are in lock text section */
if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
return true;
return false;
}
| linux-master | tools/perf/util/machine.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <sched.h>
#include "util.h" // for sched_getcpu()
#include "../perf-sys.h"
#include "cloexec.h"
#include "event.h"
#include "asm/bug.h"
#include "debug.h"
#include <unistd.h>
#include <sys/syscall.h>
#include <linux/string.h>
static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
static int perf_flag_probe(void)
{
/* use 'safest' configuration as used in evsel__fallback() */
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
.exclude_kernel = 1,
};
int fd;
int err;
int cpu;
pid_t pid = -1;
char sbuf[STRERR_BUFSIZE];
cpu = sched_getcpu();
if (cpu < 0)
cpu = 0;
/*
* Using -1 for the pid is a workaround to avoid gratuitous jump label
* changes.
*/
while (1) {
/* check cloexec flag */
fd = sys_perf_event_open(&attr, pid, cpu, -1,
PERF_FLAG_FD_CLOEXEC);
if (fd < 0 && pid == -1 && errno == EACCES) {
pid = 0;
continue;
}
break;
}
err = errno;
if (fd >= 0) {
close(fd);
return 1;
}
WARN_ONCE(err != EINVAL && err != EBUSY && err != EACCES,
"perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n",
err, str_error_r(err, sbuf, sizeof(sbuf)));
/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
while (1) {
fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
if (fd < 0 && pid == -1 && errno == EACCES) {
pid = 0;
continue;
}
break;
}
err = errno;
if (fd >= 0)
close(fd);
if (WARN_ONCE(fd < 0 && err != EBUSY && err != EACCES,
"perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
err, str_error_r(err, sbuf, sizeof(sbuf))))
return -1;
return 0;
}
unsigned long perf_event_open_cloexec_flag(void)
{
static bool probed;
if (!probed) {
if (perf_flag_probe() <= 0)
flag = 0;
probed = true;
}
return flag;
}
| linux-master | tools/perf/util/cloexec.c |
#include "util/map_symbol.h"
#include "util/branch.h"
#include <linux/kernel.h>
static bool cross_area(u64 addr1, u64 addr2, int size)
{
u64 align1, align2;
align1 = addr1 & ~(size - 1);
align2 = addr2 & ~(size - 1);
return (align1 != align2) ? true : false;
}
#define AREA_4K 4096
#define AREA_2M (2 * 1024 * 1024)
void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
u64 from, u64 to)
{
if (flags->type == PERF_BR_UNKNOWN || from == 0)
return;
if (flags->type == PERF_BR_EXTEND_ABI)
st->new_counts[flags->new_type]++;
else
st->counts[flags->type]++;
if (flags->type == PERF_BR_COND) {
if (to > from)
st->cond_fwd++;
else
st->cond_bwd++;
}
if (cross_area(from, to, AREA_2M))
st->cross_2m++;
else if (cross_area(from, to, AREA_4K))
st->cross_4k++;
}
const char *branch_new_type_name(int new_type)
{
const char *branch_new_names[PERF_BR_NEW_MAX] = {
"FAULT_ALGN",
"FAULT_DATA",
"FAULT_INST",
/*
* TODO: This switch should happen on 'session->header.env.arch'
* instead, because an arm64 platform perf recording could be
* opened for analysis on other platforms as well.
*/
#ifdef __aarch64__
"ARM64_FIQ",
"ARM64_DEBUG_HALT",
"ARM64_DEBUG_EXIT",
"ARM64_DEBUG_INST",
"ARM64_DEBUG_DATA"
#else
"ARCH_1",
"ARCH_2",
"ARCH_3",
"ARCH_4",
"ARCH_5"
#endif
};
if (new_type >= 0 && new_type < PERF_BR_NEW_MAX)
return branch_new_names[new_type];
return NULL;
}
const char *branch_type_name(int type)
{
const char *branch_names[PERF_BR_MAX] = {
"N/A",
"COND",
"UNCOND",
"IND",
"CALL",
"IND_CALL",
"RET",
"SYSCALL",
"SYSRET",
"COND_CALL",
"COND_RET",
"ERET",
"IRQ",
"SERROR",
"NO_TX",
"", // Needed for PERF_BR_EXTEND_ABI that ends up triggering some compiler warnings about NULL deref
};
if (type >= 0 && type < PERF_BR_MAX)
return branch_names[type];
return NULL;
}
const char *get_branch_type(struct branch_entry *e)
{
if (e->flags.type == PERF_BR_UNKNOWN)
return "";
if (e->flags.type == PERF_BR_EXTEND_ABI)
return branch_new_type_name(e->flags.new_type);
return branch_type_name(e->flags.type);
}
void branch_type_stat_display(FILE *fp, struct branch_type_stat *st)
{
u64 total = 0;
int i;
for (i = 0; i < PERF_BR_MAX; i++)
total += st->counts[i];
if (total == 0)
return;
fprintf(fp, "\n#");
fprintf(fp, "\n# Branch Statistics:");
fprintf(fp, "\n#");
if (st->cond_fwd > 0) {
fprintf(fp, "\n%8s: %5.1f%%",
"COND_FWD",
100.0 * (double)st->cond_fwd / (double)total);
}
if (st->cond_bwd > 0) {
fprintf(fp, "\n%8s: %5.1f%%",
"COND_BWD",
100.0 * (double)st->cond_bwd / (double)total);
}
if (st->cross_4k > 0) {
fprintf(fp, "\n%8s: %5.1f%%",
"CROSS_4K",
100.0 * (double)st->cross_4k / (double)total);
}
if (st->cross_2m > 0) {
fprintf(fp, "\n%8s: %5.1f%%",
"CROSS_2M",
100.0 * (double)st->cross_2m / (double)total);
}
for (i = 0; i < PERF_BR_MAX; i++) {
if (st->counts[i] > 0)
fprintf(fp, "\n%8s: %5.1f%%",
branch_type_name(i),
100.0 *
(double)st->counts[i] / (double)total);
}
for (i = 0; i < PERF_BR_NEW_MAX; i++) {
if (st->new_counts[i] > 0)
fprintf(fp, "\n%8s: %5.1f%%",
branch_new_type_name(i),
100.0 *
(double)st->new_counts[i] / (double)total);
}
}
static int count_str_scnprintf(int idx, const char *str, char *bf, int size)
{
return scnprintf(bf, size, "%s%s", (idx) ? " " : " (", str);
}
int branch_type_str(struct branch_type_stat *st, char *bf, int size)
{
int i, j = 0, printed = 0;
u64 total = 0;
for (i = 0; i < PERF_BR_MAX; i++)
total += st->counts[i];
for (i = 0; i < PERF_BR_NEW_MAX; i++)
total += st->new_counts[i];
if (total == 0)
return 0;
if (st->cond_fwd > 0)
printed += count_str_scnprintf(j++, "COND_FWD", bf + printed, size - printed);
if (st->cond_bwd > 0)
printed += count_str_scnprintf(j++, "COND_BWD", bf + printed, size - printed);
for (i = 0; i < PERF_BR_MAX; i++) {
if (i == PERF_BR_COND)
continue;
if (st->counts[i] > 0)
printed += count_str_scnprintf(j++, branch_type_name(i), bf + printed, size - printed);
}
for (i = 0; i < PERF_BR_NEW_MAX; i++) {
if (st->new_counts[i] > 0)
printed += count_str_scnprintf(j++, branch_new_type_name(i), bf + printed, size - printed);
}
if (st->cross_4k > 0)
printed += count_str_scnprintf(j++, "CROSS_4K", bf + printed, size - printed);
if (st->cross_2m > 0)
printed += count_str_scnprintf(j++, "CROSS_2M", bf + printed, size - printed);
return printed;
}
const char *branch_spec_desc(int spec)
{
const char *branch_spec_outcomes[PERF_BR_SPEC_MAX] = {
"N/A",
"SPEC_WRONG_PATH",
"NON_SPEC_CORRECT_PATH",
"SPEC_CORRECT_PATH",
};
if (spec >= 0 && spec < PERF_BR_SPEC_MAX)
return branch_spec_outcomes[spec];
return NULL;
}
| linux-master | tools/perf/util/branch.c |
// SPDX-License-Identifier: LGPL-2.1
// Copyright (C) 2018, 2019 Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
//
#ifndef HAVE_GET_CURRENT_DIR_NAME
#include "get_current_dir_name.h"
#include <limits.h>
#include <string.h>
#include <unistd.h>
/* Android's 'bionic' library, for one, doesn't have this */
char *get_current_dir_name(void)
{
char pwd[PATH_MAX];
return getcwd(pwd, sizeof(pwd)) == NULL ? NULL : strdup(pwd);
}
#endif // HAVE_GET_CURRENT_DIR_NAME
| linux-master | tools/perf/util/get_current_dir_name.c |
// SPDX-License-Identifier: GPL-2.0
#include "path.h"
#include "cache.h"
#include <linux/kernel.h>
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <unistd.h>
static char *cleanup_path(char *path)
{
/* Clean it up */
if (!memcmp(path, "./", 2)) {
path += 2;
while (*path == '/')
path++;
}
return path;
}
char *mkpath(char *path_buf, size_t sz, const char *fmt, ...)
{
va_list args;
unsigned len;
va_start(args, fmt);
len = vsnprintf(path_buf, sz, fmt, args);
va_end(args);
if (len >= sz)
strncpy(path_buf, "/bad-path/", sz);
return cleanup_path(path_buf);
}
int path__join(char *bf, size_t size, const char *path1, const char *path2)
{
return scnprintf(bf, size, "%s%s%s", path1, path1[0] ? "/" : "", path2);
}
int path__join3(char *bf, size_t size, const char *path1, const char *path2, const char *path3)
{
return scnprintf(bf, size, "%s%s%s%s%s", path1, path1[0] ? "/" : "",
path2, path2[0] ? "/" : "", path3);
}
bool is_regular_file(const char *file)
{
struct stat st;
if (stat(file, &st))
return false;
return S_ISREG(st.st_mode);
}
/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
bool is_directory(const char *base_path, const struct dirent *dent)
{
char path[PATH_MAX];
struct stat st;
snprintf(path, sizeof(path), "%s/%s", base_path, dent->d_name);
if (stat(path, &st))
return false;
return S_ISDIR(st.st_mode);
}
bool is_executable_file(const char *base_path, const struct dirent *dent)
{
char path[PATH_MAX];
struct stat st;
snprintf(path, sizeof(path), "%s/%s", base_path, dent->d_name);
if (stat(path, &st))
return false;
return !S_ISDIR(st.st_mode) && (st.st_mode & S_IXUSR);
}
| linux-master | tools/perf/util/path.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* probe-finder.c : C expression to kprobe event converter
*
* Written by Masami Hiramatsu <[email protected]>
*/
#include <inttypes.h>
#include <sys/utsname.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <dwarf-regs.h>
#include <linux/bitops.h>
#include <linux/zalloc.h>
#include "event.h"
#include "dso.h"
#include "debug.h"
#include "intlist.h"
#include "strbuf.h"
#include "strlist.h"
#include "symbol.h"
#include "probe-finder.h"
#include "probe-file.h"
#include "string2.h"
#ifdef HAVE_DEBUGINFOD_SUPPORT
#include <elfutils/debuginfod.h>
#endif
/* Kprobe tracer basic type is up to u64 */
#define MAX_BASIC_TYPE_BITS 64
/* Dwarf FL wrappers */
static char *debuginfo_path; /* Currently dummy */
static const Dwfl_Callbacks offline_callbacks = {
.find_debuginfo = dwfl_standard_find_debuginfo,
.debuginfo_path = &debuginfo_path,
.section_address = dwfl_offline_section_address,
/* We use this table for core files too. */
.find_elf = dwfl_build_id_find_elf,
};
/* Get a Dwarf from offline image */
static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
const char *path)
{
GElf_Addr dummy;
int fd;
fd = open(path, O_RDONLY);
if (fd < 0)
return fd;
dbg->dwfl = dwfl_begin(&offline_callbacks);
if (!dbg->dwfl)
goto error;
dwfl_report_begin(dbg->dwfl);
dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
if (!dbg->mod)
goto error;
dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias);
if (!dbg->dbg)
goto error;
dwfl_module_build_id(dbg->mod, &dbg->build_id, &dummy);
dwfl_report_end(dbg->dwfl, NULL, NULL);
return 0;
error:
if (dbg->dwfl)
dwfl_end(dbg->dwfl);
else
close(fd);
memset(dbg, 0, sizeof(*dbg));
return -ENOENT;
}
static struct debuginfo *__debuginfo__new(const char *path)
{
struct debuginfo *dbg = zalloc(sizeof(*dbg));
if (!dbg)
return NULL;
if (debuginfo__init_offline_dwarf(dbg, path) < 0)
zfree(&dbg);
if (dbg)
pr_debug("Open Debuginfo file: %s\n", path);
return dbg;
}
enum dso_binary_type distro_dwarf_types[] = {
DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
struct debuginfo *debuginfo__new(const char *path)
{
enum dso_binary_type *type;
char buf[PATH_MAX], nil = '\0';
struct dso *dso;
struct debuginfo *dinfo = NULL;
struct build_id bid;
/* Try to open distro debuginfo files */
dso = dso__new(path);
if (!dso)
goto out;
/* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
dso__set_build_id(dso, &bid);
for (type = distro_dwarf_types;
!dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
type++) {
if (dso__read_binary_type_filename(dso, *type, &nil,
buf, PATH_MAX) < 0)
continue;
dinfo = __debuginfo__new(buf);
}
dso__put(dso);
out:
/* if failed to open all distro debuginfo, open given binary */
return dinfo ? : __debuginfo__new(path);
}
void debuginfo__delete(struct debuginfo *dbg)
{
if (dbg) {
if (dbg->dwfl)
dwfl_end(dbg->dwfl);
free(dbg);
}
}
/*
* Probe finder related functions
*/
static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
{
struct probe_trace_arg_ref *ref;
ref = zalloc(sizeof(struct probe_trace_arg_ref));
if (ref != NULL)
ref->offset = offs;
return ref;
}
/*
* Convert a location into trace_arg.
* If tvar == NULL, this just checks variable can be converted.
* If fentry == true and vr_die is a parameter, do heuristic search
* for the location fuzzed by function entry mcount.
*/
static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
Dwarf_Op *fb_ops, Dwarf_Die *sp_die,
unsigned int machine,
struct probe_trace_arg *tvar)
{
Dwarf_Attribute attr;
Dwarf_Addr tmp = 0;
Dwarf_Op *op;
size_t nops;
unsigned int regn;
Dwarf_Word offs = 0;
bool ref = false;
const char *regs;
int ret, ret2 = 0;
if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL)
goto static_var;
/* Constant value */
if (dwarf_attr(vr_die, DW_AT_const_value, &attr) &&
immediate_value_is_supported()) {
Dwarf_Sword snum;
if (!tvar)
return 0;
dwarf_formsdata(&attr, &snum);
ret = asprintf(&tvar->value, "\\%ld", (long)snum);
return ret < 0 ? -ENOMEM : 0;
}
/* TODO: handle more than 1 exprs */
if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
return -EINVAL; /* Broken DIE ? */
if (dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0) {
ret = dwarf_entrypc(sp_die, &tmp);
if (ret)
return -ENOENT;
if (probe_conf.show_location_range &&
(dwarf_tag(vr_die) == DW_TAG_variable)) {
ret2 = -ERANGE;
} else if (addr != tmp ||
dwarf_tag(vr_die) != DW_TAG_formal_parameter) {
return -ENOENT;
}
ret = dwarf_highpc(sp_die, &tmp);
if (ret)
return -ENOENT;
/*
* This is fuzzed by fentry mcount. We try to find the
* parameter location at the earliest address.
*/
for (addr += 1; addr <= tmp; addr++) {
if (dwarf_getlocation_addr(&attr, addr, &op,
&nops, 1) > 0)
goto found;
}
return -ENOENT;
}
found:
if (nops == 0)
/* TODO: Support const_value */
return -ENOENT;
if (op->atom == DW_OP_addr) {
static_var:
if (!tvar)
return ret2;
/* Static variables on memory (not stack), make @varname */
ret = strlen(dwarf_diename(vr_die));
tvar->value = zalloc(ret + 2);
if (tvar->value == NULL)
return -ENOMEM;
snprintf(tvar->value, ret + 2, "@%s", dwarf_diename(vr_die));
tvar->ref = alloc_trace_arg_ref((long)offs);
if (tvar->ref == NULL)
return -ENOMEM;
return ret2;
}
/* If this is based on frame buffer, set the offset */
if (op->atom == DW_OP_fbreg) {
if (fb_ops == NULL)
return -ENOTSUP;
ref = true;
offs = op->number;
op = &fb_ops[0];
}
if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
regn = op->atom - DW_OP_breg0;
offs += op->number;
ref = true;
} else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) {
regn = op->atom - DW_OP_reg0;
} else if (op->atom == DW_OP_bregx) {
regn = op->number;
offs += op->number2;
ref = true;
} else if (op->atom == DW_OP_regx) {
regn = op->number;
} else {
pr_debug("DW_OP %x is not supported.\n", op->atom);
return -ENOTSUP;
}
if (!tvar)
return ret2;
regs = get_dwarf_regstr(regn, machine);
if (!regs) {
/* This should be a bug in DWARF or this tool */
pr_warning("Mapping for the register number %u "
"missing on this architecture.\n", regn);
return -ENOTSUP;
}
tvar->value = strdup(regs);
if (tvar->value == NULL)
return -ENOMEM;
if (ref) {
tvar->ref = alloc_trace_arg_ref((long)offs);
if (tvar->ref == NULL)
return -ENOMEM;
}
return ret2;
}
#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg *tvar,
const char *cast, bool user_access)
{
struct probe_trace_arg_ref **ref_ptr = &tvar->ref;
Dwarf_Die type;
char buf[16];
char sbuf[STRERR_BUFSIZE];
int bsize, boffs, total;
int ret;
char prefix;
/* TODO: check all types */
if (cast && strcmp(cast, "string") != 0 && strcmp(cast, "ustring") &&
strcmp(cast, "x") != 0 &&
strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) {
/* Non string type is OK */
/* and respect signedness/hexadecimal cast */
tvar->type = strdup(cast);
return (tvar->type == NULL) ? -ENOMEM : 0;
}
bsize = dwarf_bitsize(vr_die);
if (bsize > 0) {
/* This is a bitfield */
boffs = dwarf_bitoffset(vr_die);
total = dwarf_bytesize(vr_die);
if (boffs < 0 || total < 0)
return -ENOENT;
ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs,
BYTES_TO_BITS(total));
goto formatted;
}
if (die_get_real_type(vr_die, &type) == NULL) {
pr_warning("Failed to get a type information of %s.\n",
dwarf_diename(vr_die));
return -ENOENT;
}
pr_debug("%s type is %s.\n",
dwarf_diename(vr_die), dwarf_diename(&type));
if (cast && (!strcmp(cast, "string") || !strcmp(cast, "ustring"))) {
/* String type */
ret = dwarf_tag(&type);
if (ret != DW_TAG_pointer_type &&
ret != DW_TAG_array_type) {
pr_warning("Failed to cast into string: "
"%s(%s) is not a pointer nor array.\n",
dwarf_diename(vr_die), dwarf_diename(&type));
return -EINVAL;
}
if (die_get_real_type(&type, &type) == NULL) {
pr_warning("Failed to get a type"
" information.\n");
return -ENOENT;
}
if (ret == DW_TAG_pointer_type) {
while (*ref_ptr)
ref_ptr = &(*ref_ptr)->next;
/* Add new reference with offset +0 */
*ref_ptr = zalloc(sizeof(struct probe_trace_arg_ref));
if (*ref_ptr == NULL) {
pr_warning("Out of memory error\n");
return -ENOMEM;
}
(*ref_ptr)->user_access = user_access;
}
if (!die_compare_name(&type, "char") &&
!die_compare_name(&type, "unsigned char")) {
pr_warning("Failed to cast into string: "
"%s is not (unsigned) char *.\n",
dwarf_diename(vr_die));
return -EINVAL;
}
tvar->type = strdup(cast);
return (tvar->type == NULL) ? -ENOMEM : 0;
}
if (cast && (strcmp(cast, "u") == 0))
prefix = 'u';
else if (cast && (strcmp(cast, "s") == 0))
prefix = 's';
else if (cast && (strcmp(cast, "x") == 0) &&
probe_type_is_available(PROBE_TYPE_X))
prefix = 'x';
else
prefix = die_is_signed_type(&type) ? 's' :
probe_type_is_available(PROBE_TYPE_X) ? 'x' : 'u';
ret = dwarf_bytesize(&type);
if (ret <= 0)
/* No size ... try to use default type */
return 0;
ret = BYTES_TO_BITS(ret);
/* Check the bitwidth */
if (ret > MAX_BASIC_TYPE_BITS) {
pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n",
dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
ret = MAX_BASIC_TYPE_BITS;
}
ret = snprintf(buf, 16, "%c%d", prefix, ret);
formatted:
if (ret < 0 || ret >= 16) {
if (ret >= 16)
ret = -E2BIG;
pr_warning("Failed to convert variable type: %s\n",
str_error_r(-ret, sbuf, sizeof(sbuf)));
return ret;
}
tvar->type = strdup(buf);
if (tvar->type == NULL)
return -ENOMEM;
return 0;
}
static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
struct perf_probe_arg_field *field,
struct probe_trace_arg_ref **ref_ptr,
Dwarf_Die *die_mem, bool user_access)
{
struct probe_trace_arg_ref *ref = *ref_ptr;
Dwarf_Die type;
Dwarf_Word offs;
int ret, tag;
pr_debug("converting %s in %s\n", field->name, varname);
if (die_get_real_type(vr_die, &type) == NULL) {
pr_warning("Failed to get the type of %s.\n", varname);
return -ENOENT;
}
pr_debug2("Var real type: %s (%x)\n", dwarf_diename(&type),
(unsigned)dwarf_dieoffset(&type));
tag = dwarf_tag(&type);
if (field->name[0] == '[' &&
(tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) {
/* Save original type for next field or type */
memcpy(die_mem, &type, sizeof(*die_mem));
/* Get the type of this array */
if (die_get_real_type(&type, &type) == NULL) {
pr_warning("Failed to get the type of %s.\n", varname);
return -ENOENT;
}
pr_debug2("Array real type: %s (%x)\n", dwarf_diename(&type),
(unsigned)dwarf_dieoffset(&type));
if (tag == DW_TAG_pointer_type) {
ref = zalloc(sizeof(struct probe_trace_arg_ref));
if (ref == NULL)
return -ENOMEM;
if (*ref_ptr)
(*ref_ptr)->next = ref;
else
*ref_ptr = ref;
}
ref->offset += dwarf_bytesize(&type) * field->index;
ref->user_access = user_access;
goto next;
} else if (tag == DW_TAG_pointer_type) {
/* Check the pointer and dereference */
if (!field->ref) {
pr_err("Semantic error: %s must be referred by '->'\n",
field->name);
return -EINVAL;
}
/* Get the type pointed by this pointer */
if (die_get_real_type(&type, &type) == NULL) {
pr_warning("Failed to get the type of %s.\n", varname);
return -ENOENT;
}
/* Verify it is a data structure */
tag = dwarf_tag(&type);
if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) {
pr_warning("%s is not a data structure nor a union.\n",
varname);
return -EINVAL;
}
ref = zalloc(sizeof(struct probe_trace_arg_ref));
if (ref == NULL)
return -ENOMEM;
if (*ref_ptr)
(*ref_ptr)->next = ref;
else
*ref_ptr = ref;
} else {
/* Verify it is a data structure */
if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) {
pr_warning("%s is not a data structure nor a union.\n",
varname);
return -EINVAL;
}
if (field->name[0] == '[') {
pr_err("Semantic error: %s is not a pointer"
" nor array.\n", varname);
return -EINVAL;
}
/* While processing unnamed field, we don't care about this */
if (field->ref && dwarf_diename(vr_die)) {
pr_err("Semantic error: %s must be referred by '.'\n",
field->name);
return -EINVAL;
}
if (!ref) {
pr_warning("Structure on a register is not "
"supported yet.\n");
return -ENOTSUP;
}
}
if (die_find_member(&type, field->name, die_mem) == NULL) {
pr_warning("%s(type:%s) has no member %s.\n", varname,
dwarf_diename(&type), field->name);
return -EINVAL;
}
/* Get the offset of the field */
if (tag == DW_TAG_union_type) {
offs = 0;
} else {
ret = die_get_data_member_location(die_mem, &offs);
if (ret < 0) {
pr_warning("Failed to get the offset of %s.\n",
field->name);
return ret;
}
}
ref->offset += (long)offs;
ref->user_access = user_access;
/* If this member is unnamed, we need to reuse this field */
if (!dwarf_diename(die_mem))
return convert_variable_fields(die_mem, varname, field,
&ref, die_mem, user_access);
next:
/* Converting next field */
if (field->next)
return convert_variable_fields(die_mem, field->name,
field->next, &ref, die_mem, user_access);
else
return 0;
}
static void print_var_not_found(const char *varname)
{
pr_err("Failed to find the location of the '%s' variable at this address.\n"
" Perhaps it has been optimized out.\n"
" Use -V with the --range option to show '%s' location range.\n",
varname, varname);
}
/* Show a variables in kprobe event format */
static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
{
Dwarf_Die die_mem;
int ret;
pr_debug("Converting variable %s into trace event.\n",
dwarf_diename(vr_die));
ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
&pf->sp_die, pf->machine, pf->tvar);
if (ret == -ENOENT && pf->skip_empty_arg)
/* This can be found in other place. skip it */
return 0;
if (ret == -ENOENT || ret == -EINVAL) {
print_var_not_found(pf->pvar->var);
} else if (ret == -ENOTSUP)
pr_err("Sorry, we don't support this variable location yet.\n");
else if (ret == 0 && pf->pvar->field) {
ret = convert_variable_fields(vr_die, pf->pvar->var,
pf->pvar->field, &pf->tvar->ref,
&die_mem, pf->pvar->user_access);
vr_die = &die_mem;
}
if (ret == 0)
ret = convert_variable_type(vr_die, pf->tvar, pf->pvar->type,
pf->pvar->user_access);
/* *expr will be cached in libdw. Don't free it. */
return ret;
}
/* Find a variable in a scope DIE */
static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
{
Dwarf_Die vr_die;
char *buf, *ptr;
int ret = 0;
/* Copy raw parameters */
if (!is_c_varname(pf->pvar->var))
return copy_to_probe_trace_arg(pf->tvar, pf->pvar);
if (pf->pvar->name)
pf->tvar->name = strdup(pf->pvar->name);
else {
buf = synthesize_perf_probe_arg(pf->pvar);
if (!buf)
return -ENOMEM;
ptr = strchr(buf, ':'); /* Change type separator to _ */
if (ptr)
*ptr = '_';
pf->tvar->name = buf;
}
if (pf->tvar->name == NULL)
return -ENOMEM;
pr_debug("Searching '%s' variable in context.\n", pf->pvar->var);
/* Search child die for local variables and parameters. */
if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
/* Search again in global variables */
if (!die_find_variable_at(&pf->cu_die, pf->pvar->var,
0, &vr_die)) {
if (pf->skip_empty_arg)
return 0;
pr_warning("Failed to find '%s' in this function.\n",
pf->pvar->var);
ret = -ENOENT;
}
}
if (ret >= 0)
ret = convert_variable(&vr_die, pf);
return ret;
}
/* Convert subprogram DIE to trace point */
static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
Dwarf_Addr paddr, bool retprobe,
const char *function,
struct probe_trace_point *tp)
{
Dwarf_Addr eaddr;
GElf_Sym sym;
const char *symbol;
/* Verify the address is correct */
if (!dwarf_haspc(sp_die, paddr)) {
pr_warning("Specified offset is out of %s\n",
dwarf_diename(sp_die));
return -EINVAL;
}
if (dwarf_entrypc(sp_die, &eaddr) == 0) {
/* If the DIE has entrypc, use it. */
symbol = dwarf_diename(sp_die);
} else {
/* Try to get actual symbol name and address from symtab */
symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
eaddr = sym.st_value;
}
if (!symbol) {
pr_warning("Failed to find symbol at 0x%lx\n",
(unsigned long)paddr);
return -ENOENT;
}
tp->offset = (unsigned long)(paddr - eaddr);
tp->address = paddr;
tp->symbol = strdup(symbol);
if (!tp->symbol)
return -ENOMEM;
/* Return probe must be on the head of a subprogram */
if (retprobe) {
if (eaddr != paddr) {
pr_warning("Failed to find \"%s%%return\",\n"
" because %s is an inlined function and"
" has no return point.\n", function,
function);
return -EINVAL;
}
tp->retprobe = true;
}
return 0;
}
/* Call probe_finder callback with scope DIE */
static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
{
Dwarf_Attribute fb_attr;
Dwarf_Frame *frame = NULL;
size_t nops;
int ret;
if (!sc_die) {
pr_err("Caller must pass a scope DIE. Program error.\n");
return -EINVAL;
}
/* If not a real subprogram, find a real one */
if (!die_is_func_def(sc_die)) {
if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
if (die_find_tailfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
pr_warning("Ignoring tail call from %s\n",
dwarf_diename(&pf->sp_die));
return 0;
} else {
pr_warning("Failed to find probe point in any "
"functions.\n");
return -ENOENT;
}
}
} else
memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die));
/* Get the frame base attribute/ops from subprogram */
dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr);
ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
if (ret <= 0 || nops == 0) {
pf->fb_ops = NULL;
#if _ELFUTILS_PREREQ(0, 142)
} else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
(pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) {
if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 &&
(dwarf_cfi_addrframe(pf->cfi_dbg, pf->addr, &frame) != 0)) ||
dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
pr_warning("Failed to get call frame on 0x%jx\n",
(uintmax_t)pf->addr);
free(frame);
return -ENOENT;
}
#endif
}
/* Call finder's callback handler */
ret = pf->callback(sc_die, pf);
/* Since *pf->fb_ops can be a part of frame. we should free it here. */
free(frame);
pf->fb_ops = NULL;
return ret;
}
struct find_scope_param {
const char *function;
const char *file;
int line;
int diff;
Dwarf_Die *die_mem;
bool found;
};
static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
{
struct find_scope_param *fsp = data;
const char *file;
int lno;
/* Skip if declared file name does not match */
if (fsp->file) {
file = die_get_decl_file(fn_die);
if (!file || strcmp(fsp->file, file) != 0)
return 0;
}
/* If the function name is given, that's what user expects */
if (fsp->function) {
if (die_match_name(fn_die, fsp->function)) {
memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
fsp->found = true;
return 1;
}
} else {
/* With the line number, find the nearest declared DIE */
dwarf_decl_line(fn_die, &lno);
if (lno < fsp->line && fsp->diff > fsp->line - lno) {
/* Keep a candidate and continue */
fsp->diff = fsp->line - lno;
memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
fsp->found = true;
}
}
return 0;
}
/* Return innermost DIE */
static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data)
{
struct find_scope_param *fsp = data;
memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
fsp->found = true;
return 1;
}
/* Find an appropriate scope fits to given conditions */
static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
{
struct find_scope_param fsp = {
.function = pf->pev->point.function,
.file = pf->fname,
.line = pf->lno,
.diff = INT_MAX,
.die_mem = die_mem,
.found = false,
};
int ret;
ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb,
&fsp);
if (!ret && !fsp.found)
cu_walk_functions_at(&pf->cu_die, pf->addr,
find_inner_scope_cb, &fsp);
return fsp.found ? die_mem : NULL;
}
static int verify_representive_line(struct probe_finder *pf, const char *fname,
int lineno, Dwarf_Addr addr)
{
const char *__fname, *__func = NULL;
Dwarf_Die die_mem;
int __lineno;
/* Verify line number and address by reverse search */
if (cu_find_lineinfo(&pf->cu_die, addr, &__fname, &__lineno) < 0)
return 0;
pr_debug2("Reversed line: %s:%d\n", __fname, __lineno);
if (strcmp(fname, __fname) || lineno == __lineno)
return 0;
pr_warning("This line is sharing the address with other lines.\n");
if (pf->pev->point.function) {
/* Find best match function name and lines */
pf->addr = addr;
if (find_best_scope(pf, &die_mem)
&& die_match_name(&die_mem, pf->pev->point.function)
&& dwarf_decl_line(&die_mem, &lineno) == 0) {
__func = dwarf_diename(&die_mem);
__lineno -= lineno;
}
}
pr_warning("Please try to probe at %s:%d instead.\n",
__func ? : __fname, __lineno);
return -ENOENT;
}
static int probe_point_line_walker(const char *fname, int lineno,
Dwarf_Addr addr, void *data)
{
struct probe_finder *pf = data;
Dwarf_Die *sc_die, die_mem;
int ret;
if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
return 0;
if (verify_representive_line(pf, fname, lineno, addr))
return -ENOENT;
pf->addr = addr;
sc_die = find_best_scope(pf, &die_mem);
if (!sc_die) {
pr_warning("Failed to find scope of probe point.\n");
return -ENOENT;
}
ret = call_probe_finder(sc_die, pf);
/* Continue if no error, because the line will be in inline function */
return ret < 0 ? ret : 0;
}
/* Find probe point from its line number */
static int find_probe_point_by_line(struct probe_finder *pf)
{
return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf);
}
/* Find lines which match lazy pattern */
static int find_lazy_match_lines(struct intlist *list,
const char *fname, const char *pat)
{
FILE *fp;
char *line = NULL;
size_t line_len;
ssize_t len;
int count = 0, linenum = 1;
char sbuf[STRERR_BUFSIZE];
fp = fopen(fname, "r");
if (!fp) {
pr_warning("Failed to open %s: %s\n", fname,
str_error_r(errno, sbuf, sizeof(sbuf)));
return -errno;
}
while ((len = getline(&line, &line_len, fp)) > 0) {
if (line[len - 1] == '\n')
line[len - 1] = '\0';
if (strlazymatch(line, pat)) {
intlist__add(list, linenum);
count++;
}
linenum++;
}
if (ferror(fp))
count = -errno;
free(line);
fclose(fp);
if (count == 0)
pr_debug("No matched lines found in %s.\n", fname);
return count;
}
static int probe_point_lazy_walker(const char *fname, int lineno,
Dwarf_Addr addr, void *data)
{
struct probe_finder *pf = data;
Dwarf_Die *sc_die, die_mem;
int ret;
if (!intlist__has_entry(pf->lcache, lineno) ||
strtailcmp(fname, pf->fname) != 0)
return 0;
pr_debug("Probe line found: line:%d addr:0x%llx\n",
lineno, (unsigned long long)addr);
pf->addr = addr;
pf->lno = lineno;
sc_die = find_best_scope(pf, &die_mem);
if (!sc_die) {
pr_warning("Failed to find scope of probe point.\n");
return -ENOENT;
}
ret = call_probe_finder(sc_die, pf);
/*
* Continue if no error, because the lazy pattern will match
* to other lines
*/
return ret < 0 ? ret : 0;
}
/* Find probe points from lazy pattern */
static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
{
struct build_id bid;
char sbuild_id[SBUILD_ID_SIZE] = "";
int ret = 0;
char *fpath;
if (intlist__empty(pf->lcache)) {
const char *comp_dir;
comp_dir = cu_get_comp_dir(&pf->cu_die);
if (pf->dbg->build_id) {
build_id__init(&bid, pf->dbg->build_id, BUILD_ID_SIZE);
build_id__sprintf(&bid, sbuild_id);
}
ret = find_source_path(pf->fname, sbuild_id, comp_dir, &fpath);
if (ret < 0) {
pr_warning("Failed to find source file path.\n");
return ret;
}
/* Matching lazy line pattern */
ret = find_lazy_match_lines(pf->lcache, fpath,
pf->pev->point.lazy_line);
free(fpath);
if (ret <= 0)
return ret;
}
return die_walk_lines(sp_die, probe_point_lazy_walker, pf);
}
static void skip_prologue(Dwarf_Die *sp_die, struct probe_finder *pf)
{
struct perf_probe_point *pp = &pf->pev->point;
/* Not uprobe? */
if (!pf->pev->uprobes)
return;
/* Compiled with optimization? */
if (die_is_optimized_target(&pf->cu_die))
return;
/* Don't know entrypc? */
if (!pf->addr)
return;
/* Only FUNC and FUNC@SRC are eligible. */
if (!pp->function || pp->line || pp->retprobe || pp->lazy_line ||
pp->offset || pp->abs_address)
return;
/* Not interested in func parameter? */
if (!perf_probe_with_var(pf->pev))
return;
pr_info("Target program is compiled without optimization. Skipping prologue.\n"
"Probe on address 0x%" PRIx64 " to force probing at the function entry.\n\n",
pf->addr);
die_skip_prologue(sp_die, &pf->cu_die, &pf->addr);
}
static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
{
struct probe_finder *pf = data;
struct perf_probe_point *pp = &pf->pev->point;
Dwarf_Addr addr;
int ret;
if (pp->lazy_line)
ret = find_probe_point_lazy(in_die, pf);
else {
/* Get probe address */
if (die_entrypc(in_die, &addr) != 0) {
pr_warning("Failed to get entry address of %s.\n",
dwarf_diename(in_die));
return -ENOENT;
}
if (addr == 0) {
pr_debug("%s has no valid entry address. skipped.\n",
dwarf_diename(in_die));
return -ENOENT;
}
pf->addr = addr;
pf->addr += pp->offset;
pr_debug("found inline addr: 0x%jx\n",
(uintmax_t)pf->addr);
ret = call_probe_finder(in_die, pf);
}
return ret;
}
/* Callback parameter with return value for libdw */
struct dwarf_callback_param {
void *data;
int retval;
};
/* Search function from function name */
static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
{
struct dwarf_callback_param *param = data;
struct probe_finder *pf = param->data;
struct perf_probe_point *pp = &pf->pev->point;
const char *fname;
/* Check tag and diename */
if (!die_is_func_def(sp_die) ||
!die_match_name(sp_die, pp->function))
return DWARF_CB_OK;
/* Check declared file */
fname = die_get_decl_file(sp_die);
if (!fname) {
pr_warning("A function DIE doesn't have decl_line. Maybe broken DWARF?\n");
return DWARF_CB_OK;
}
if (pp->file && fname && strtailcmp(pp->file, fname))
return DWARF_CB_OK;
pr_debug("Matched function: %s [%lx]\n", dwarf_diename(sp_die),
(unsigned long)dwarf_dieoffset(sp_die));
pf->fname = fname;
if (pp->line) { /* Function relative line */
dwarf_decl_line(sp_die, &pf->lno);
pf->lno += pp->line;
param->retval = find_probe_point_by_line(pf);
} else if (die_is_func_instance(sp_die)) {
/* Instances always have the entry address */
die_entrypc(sp_die, &pf->addr);
/* But in some case the entry address is 0 */
if (pf->addr == 0) {
pr_debug("%s has no entry PC. Skipped\n",
dwarf_diename(sp_die));
param->retval = 0;
/* Real function */
} else if (pp->lazy_line)
param->retval = find_probe_point_lazy(sp_die, pf);
else {
skip_prologue(sp_die, pf);
pf->addr += pp->offset;
/* TODO: Check the address in this function */
param->retval = call_probe_finder(sp_die, pf);
}
} else if (!probe_conf.no_inlines) {
/* Inlined function: search instances */
param->retval = die_walk_instances(sp_die,
probe_point_inline_cb, (void *)pf);
/* This could be a non-existed inline definition */
if (param->retval == -ENOENT)
param->retval = 0;
}
/* We need to find other candidates */
if (strisglob(pp->function) && param->retval >= 0) {
param->retval = 0; /* We have to clear the result */
return DWARF_CB_OK;
}
return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
}
static int find_probe_point_by_func(struct probe_finder *pf)
{
struct dwarf_callback_param _param = {.data = (void *)pf,
.retval = 0};
dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0);
return _param.retval;
}
struct pubname_callback_param {
char *function;
char *file;
Dwarf_Die *cu_die;
Dwarf_Die *sp_die;
int found;
};
static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
{
struct pubname_callback_param *param = data;
const char *fname;
if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) {
if (dwarf_tag(param->sp_die) != DW_TAG_subprogram)
return DWARF_CB_OK;
if (die_match_name(param->sp_die, param->function)) {
if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))
return DWARF_CB_OK;
if (param->file) {
fname = die_get_decl_file(param->sp_die);
if (!fname || strtailcmp(param->file, fname))
return DWARF_CB_OK;
}
param->found = 1;
return DWARF_CB_ABORT;
}
}
return DWARF_CB_OK;
}
static int debuginfo__find_probe_location(struct debuginfo *dbg,
struct probe_finder *pf)
{
struct perf_probe_point *pp = &pf->pev->point;
Dwarf_Off off, noff;
size_t cuhl;
Dwarf_Die *diep;
int ret = 0;
off = 0;
pf->lcache = intlist__new(NULL);
if (!pf->lcache)
return -ENOMEM;
/* Fastpath: lookup by function name from .debug_pubnames section */
if (pp->function && !strisglob(pp->function)) {
struct pubname_callback_param pubname_param = {
.function = pp->function,
.file = pp->file,
.cu_die = &pf->cu_die,
.sp_die = &pf->sp_die,
.found = 0,
};
struct dwarf_callback_param probe_param = {
.data = pf,
};
dwarf_getpubnames(dbg->dbg, pubname_search_cb,
&pubname_param, 0);
if (pubname_param.found) {
ret = probe_point_search_cb(&pf->sp_die, &probe_param);
if (ret)
goto found;
}
}
/* Loop on CUs (Compilation Unit) */
while (!dwarf_nextcu(dbg->dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
/* Get the DIE(Debugging Information Entry) of this CU */
diep = dwarf_offdie(dbg->dbg, off + cuhl, &pf->cu_die);
if (!diep) {
off = noff;
continue;
}
/* Check if target file is included. */
if (pp->file)
pf->fname = cu_find_realpath(&pf->cu_die, pp->file);
else
pf->fname = NULL;
if (!pp->file || pf->fname) {
if (pp->function)
ret = find_probe_point_by_func(pf);
else if (pp->lazy_line)
ret = find_probe_point_lazy(&pf->cu_die, pf);
else {
pf->lno = pp->line;
ret = find_probe_point_by_line(pf);
}
if (ret < 0)
break;
}
off = noff;
}
found:
intlist__delete(pf->lcache);
pf->lcache = NULL;
return ret;
}
/* Find probe points from debuginfo */
static int debuginfo__find_probes(struct debuginfo *dbg,
struct probe_finder *pf)
{
int ret = 0;
Elf *elf;
GElf_Ehdr ehdr;
if (pf->cfi_eh || pf->cfi_dbg)
return debuginfo__find_probe_location(dbg, pf);
/* Get the call frame information from this dwarf */
elf = dwarf_getelf(dbg->dbg);
if (elf == NULL)
return -EINVAL;
if (gelf_getehdr(elf, &ehdr) == NULL)
return -EINVAL;
pf->machine = ehdr.e_machine;
#if _ELFUTILS_PREREQ(0, 142)
do {
GElf_Shdr shdr;
if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
shdr.sh_type == SHT_PROGBITS)
pf->cfi_eh = dwarf_getcfi_elf(elf);
pf->cfi_dbg = dwarf_getcfi(dbg->dbg);
} while (0);
#endif
ret = debuginfo__find_probe_location(dbg, pf);
return ret;
}
struct local_vars_finder {
struct probe_finder *pf;
struct perf_probe_arg *args;
bool vars;
int max_args;
int nargs;
int ret;
};
/* Collect available variables in this scope */
static int copy_variables_cb(Dwarf_Die *die_mem, void *data)
{
struct local_vars_finder *vf = data;
struct probe_finder *pf = vf->pf;
int tag;
tag = dwarf_tag(die_mem);
if (tag == DW_TAG_formal_parameter ||
(tag == DW_TAG_variable && vf->vars)) {
if (convert_variable_location(die_mem, vf->pf->addr,
vf->pf->fb_ops, &pf->sp_die,
pf->machine, NULL) == 0) {
vf->args[vf->nargs].var = (char *)dwarf_diename(die_mem);
if (vf->args[vf->nargs].var == NULL) {
vf->ret = -ENOMEM;
return DIE_FIND_CB_END;
}
pr_debug(" %s", vf->args[vf->nargs].var);
vf->nargs++;
}
}
if (dwarf_haspc(die_mem, vf->pf->addr))
return DIE_FIND_CB_CONTINUE;
else
return DIE_FIND_CB_SIBLING;
}
static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
struct perf_probe_arg *args)
{
Dwarf_Die die_mem;
int i;
int n = 0;
struct local_vars_finder vf = {.pf = pf, .args = args, .vars = false,
.max_args = MAX_PROBE_ARGS, .ret = 0};
for (i = 0; i < pf->pev->nargs; i++) {
/* var never be NULL */
if (strcmp(pf->pev->args[i].var, PROBE_ARG_VARS) == 0)
vf.vars = true;
else if (strcmp(pf->pev->args[i].var, PROBE_ARG_PARAMS) != 0) {
/* Copy normal argument */
args[n] = pf->pev->args[i];
n++;
continue;
}
pr_debug("Expanding %s into:", pf->pev->args[i].var);
vf.nargs = n;
/* Special local variables */
die_find_child(sc_die, copy_variables_cb, (void *)&vf,
&die_mem);
pr_debug(" (%d)\n", vf.nargs - n);
if (vf.ret < 0)
return vf.ret;
n = vf.nargs;
}
return n;
}
static bool trace_event_finder_overlap(struct trace_event_finder *tf)
{
int i;
for (i = 0; i < tf->ntevs; i++) {
if (tf->pf.addr == tf->tevs[i].point.address)
return true;
}
return false;
}
/* Add a found probe point into trace event list */
static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
{
struct trace_event_finder *tf =
container_of(pf, struct trace_event_finder, pf);
struct perf_probe_point *pp = &pf->pev->point;
struct probe_trace_event *tev;
struct perf_probe_arg *args = NULL;
int ret, i;
/*
* For some reason (e.g. different column assigned to same address)
* This callback can be called with the address which already passed.
* Ignore it first.
*/
if (trace_event_finder_overlap(tf))
return 0;
/* Check number of tevs */
if (tf->ntevs == tf->max_tevs) {
pr_warning("Too many( > %d) probe point found.\n",
tf->max_tevs);
return -ERANGE;
}
tev = &tf->tevs[tf->ntevs++];
/* Trace point should be converted from subprogram DIE */
ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
pp->retprobe, pp->function, &tev->point);
if (ret < 0)
goto end;
tev->point.realname = strdup(dwarf_diename(sc_die));
if (!tev->point.realname) {
ret = -ENOMEM;
goto end;
}
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
/* Expand special probe argument if exist */
args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
if (args == NULL) {
ret = -ENOMEM;
goto end;
}
ret = expand_probe_args(sc_die, pf, args);
if (ret < 0)
goto end;
tev->nargs = ret;
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
if (tev->args == NULL) {
ret = -ENOMEM;
goto end;
}
/* Find each argument */
for (i = 0; i < tev->nargs; i++) {
pf->pvar = &args[i];
pf->tvar = &tev->args[i];
/* Variable should be found from scope DIE */
ret = find_variable(sc_die, pf);
if (ret != 0)
break;
}
end:
if (ret) {
clear_probe_trace_event(tev);
tf->ntevs--;
}
free(args);
return ret;
}
static int fill_empty_trace_arg(struct perf_probe_event *pev,
struct probe_trace_event *tevs, int ntevs)
{
char **valp;
char *type;
int i, j, ret;
if (!ntevs)
return -ENOENT;
for (i = 0; i < pev->nargs; i++) {
type = NULL;
for (j = 0; j < ntevs; j++) {
if (tevs[j].args[i].value) {
type = tevs[j].args[i].type;
break;
}
}
if (j == ntevs) {
print_var_not_found(pev->args[i].var);
return -ENOENT;
}
for (j = 0; j < ntevs; j++) {
valp = &tevs[j].args[i].value;
if (*valp)
continue;
ret = asprintf(valp, "\\%lx", probe_conf.magic_num);
if (ret < 0)
return -ENOMEM;
/* Note that type can be NULL */
if (type) {
tevs[j].args[i].type = strdup(type);
if (!tevs[j].args[i].type)
return -ENOMEM;
}
}
}
return 0;
}
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
int debuginfo__find_trace_events(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct probe_trace_event **tevs)
{
struct trace_event_finder tf = {
.pf = {.pev = pev, .dbg = dbg, .callback = add_probe_trace_event},
.max_tevs = probe_conf.max_probes, .mod = dbg->mod};
int ret, i;
/* Allocate result tevs array */
*tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
if (*tevs == NULL)
return -ENOMEM;
tf.tevs = *tevs;
tf.ntevs = 0;
if (pev->nargs != 0 && immediate_value_is_supported())
tf.pf.skip_empty_arg = true;
ret = debuginfo__find_probes(dbg, &tf.pf);
if (ret >= 0 && tf.pf.skip_empty_arg)
ret = fill_empty_trace_arg(pev, tf.tevs, tf.ntevs);
if (ret < 0 || tf.ntevs == 0) {
for (i = 0; i < tf.ntevs; i++)
clear_probe_trace_event(&tf.tevs[i]);
zfree(tevs);
return ret;
}
return (ret < 0) ? ret : tf.ntevs;
}
/* Collect available variables in this scope */
static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
{
struct available_var_finder *af = data;
struct variable_list *vl;
struct strbuf buf = STRBUF_INIT;
int tag, ret;
vl = &af->vls[af->nvls - 1];
tag = dwarf_tag(die_mem);
if (tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) {
ret = convert_variable_location(die_mem, af->pf.addr,
af->pf.fb_ops, &af->pf.sp_die,
af->pf.machine, NULL);
if (ret == 0 || ret == -ERANGE) {
int ret2;
bool externs = !af->child;
if (strbuf_init(&buf, 64) < 0)
goto error;
if (probe_conf.show_location_range) {
if (!externs)
ret2 = strbuf_add(&buf,
ret ? "[INV]\t" : "[VAL]\t", 6);
else
ret2 = strbuf_add(&buf, "[EXT]\t", 6);
if (ret2)
goto error;
}
ret2 = die_get_varname(die_mem, &buf);
if (!ret2 && probe_conf.show_location_range &&
!externs) {
if (strbuf_addch(&buf, '\t') < 0)
goto error;
ret2 = die_get_var_range(&af->pf.sp_die,
die_mem, &buf);
}
pr_debug("Add new var: %s\n", buf.buf);
if (ret2 == 0) {
strlist__add(vl->vars,
strbuf_detach(&buf, NULL));
}
strbuf_release(&buf);
}
}
if (af->child && dwarf_haspc(die_mem, af->pf.addr))
return DIE_FIND_CB_CONTINUE;
else
return DIE_FIND_CB_SIBLING;
error:
strbuf_release(&buf);
pr_debug("Error in strbuf\n");
return DIE_FIND_CB_END;
}
static bool available_var_finder_overlap(struct available_var_finder *af)
{
int i;
for (i = 0; i < af->nvls; i++) {
if (af->pf.addr == af->vls[i].point.address)
return true;
}
return false;
}
/* Add a found vars into available variables list */
static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
{
struct available_var_finder *af =
container_of(pf, struct available_var_finder, pf);
struct perf_probe_point *pp = &pf->pev->point;
struct variable_list *vl;
Dwarf_Die die_mem;
int ret;
/*
* For some reason (e.g. different column assigned to same address),
* this callback can be called with the address which already passed.
* Ignore it first.
*/
if (available_var_finder_overlap(af))
return 0;
/* Check number of tevs */
if (af->nvls == af->max_vls) {
pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
return -ERANGE;
}
vl = &af->vls[af->nvls++];
/* Trace point should be converted from subprogram DIE */
ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
pp->retprobe, pp->function, &vl->point);
if (ret < 0)
return ret;
pr_debug("Probe point found: %s+%lu\n", vl->point.symbol,
vl->point.offset);
/* Find local variables */
vl->vars = strlist__new(NULL, NULL);
if (vl->vars == NULL)
return -ENOMEM;
af->child = true;
die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem);
/* Find external variables */
if (!probe_conf.show_ext_vars)
goto out;
/* Don't need to search child DIE for external vars. */
af->child = false;
die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem);
out:
if (strlist__empty(vl->vars)) {
strlist__delete(vl->vars);
vl->vars = NULL;
}
return ret;
}
/*
* Find available variables at given probe point
* Return the number of found probe points. Return 0 if there is no
* matched probe point. Return <0 if an error occurs.
*/
int debuginfo__find_available_vars_at(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct variable_list **vls)
{
struct available_var_finder af = {
.pf = {.pev = pev, .dbg = dbg, .callback = add_available_vars},
.mod = dbg->mod,
.max_vls = probe_conf.max_probes};
int ret;
/* Allocate result vls array */
*vls = zalloc(sizeof(struct variable_list) * af.max_vls);
if (*vls == NULL)
return -ENOMEM;
af.vls = *vls;
af.nvls = 0;
ret = debuginfo__find_probes(dbg, &af.pf);
if (ret < 0) {
/* Free vlist for error */
while (af.nvls--) {
zfree(&af.vls[af.nvls].point.symbol);
strlist__delete(af.vls[af.nvls].vars);
}
zfree(vls);
return ret;
}
return (ret < 0) ? ret : af.nvls;
}
/* For the kernel module, we need a special code to get a DIE */
int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
bool adjust_offset)
{
int n, i;
Elf32_Word shndx;
Elf_Scn *scn;
Elf *elf;
GElf_Shdr mem, *shdr;
const char *p;
elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
if (!elf)
return -EINVAL;
/* Get the number of relocations */
n = dwfl_module_relocations(dbg->mod);
if (n < 0)
return -ENOENT;
/* Search the relocation related .text section */
for (i = 0; i < n; i++) {
p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
if (strcmp(p, ".text") == 0) {
/* OK, get the section header */
scn = elf_getscn(elf, shndx);
if (!scn)
return -ENOENT;
shdr = gelf_getshdr(scn, &mem);
if (!shdr)
return -ENOENT;
*offs = shdr->sh_addr;
if (adjust_offset)
*offs -= shdr->sh_offset;
}
}
return 0;
}
/* Reverse search */
int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
struct perf_probe_point *ppt)
{
Dwarf_Die cudie, spdie, indie;
Dwarf_Addr _addr = 0, baseaddr = 0;
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
/* We always need to relocate the address for aranges */
if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
pr_warning("Failed to find debug information for address %#" PRIx64 "\n",
addr);
ret = -EINVAL;
goto end;
}
/* Find a corresponding line (filename and lineno) */
cu_find_lineinfo(&cudie, (Dwarf_Addr)addr, &fname, &lineno);
/* Don't care whether it failed or not */
/* Find a corresponding function (name, baseline and baseaddr) */
if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
/* Get function entry information */
func = basefunc = dwarf_diename(&spdie);
if (!func ||
die_entrypc(&spdie, &baseaddr) != 0 ||
dwarf_decl_line(&spdie, &baseline) != 0) {
lineno = 0;
goto post;
}
fname = die_get_decl_file(&spdie);
if (addr == baseaddr) {
/* Function entry - Relative line number is 0 */
lineno = baseline;
goto post;
}
/* Track down the inline functions step by step */
while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
&indie)) {
/* There is an inline function */
if (die_entrypc(&indie, &_addr) == 0 &&
_addr == addr) {
/*
* addr is at an inline function entry.
* In this case, lineno should be the call-site
* line number. (overwrite lineinfo)
*/
lineno = die_get_call_lineno(&indie);
fname = die_get_call_file(&indie);
break;
} else {
/*
* addr is in an inline function body.
* Since lineno points one of the lines
* of the inline function, baseline should
* be the entry line of the inline function.
*/
tmp = dwarf_diename(&indie);
if (!tmp ||
dwarf_decl_line(&indie, &baseline) != 0)
break;
func = tmp;
spdie = indie;
}
}
/* Verify the lineno and baseline are in a same file */
tmp = die_get_decl_file(&spdie);
if (!tmp || (fname && strcmp(tmp, fname) != 0))
lineno = 0;
}
post:
/* Make a relative line number or an offset */
if (lineno)
ppt->line = lineno - baseline;
else if (basefunc) {
ppt->offset = addr - baseaddr;
func = basefunc;
}
/* Duplicate strings */
if (func) {
ppt->function = strdup(func);
if (ppt->function == NULL) {
ret = -ENOMEM;
goto end;
}
}
if (fname) {
ppt->file = strdup(fname);
if (ppt->file == NULL) {
zfree(&ppt->function);
ret = -ENOMEM;
goto end;
}
}
end:
if (ret == 0 && (fname || func))
ret = 1; /* Found a point */
return ret;
}
/* Add a line and store the src path */
static int line_range_add_line(const char *src, unsigned int lineno,
struct line_range *lr)
{
/* Copy source path */
if (!lr->path) {
lr->path = strdup(src);
if (lr->path == NULL)
return -ENOMEM;
}
return intlist__add(lr->line_list, lineno);
}
static int line_range_walk_cb(const char *fname, int lineno,
Dwarf_Addr addr, void *data)
{
struct line_finder *lf = data;
const char *__fname;
int __lineno;
int err;
if ((strtailcmp(fname, lf->fname) != 0) ||
(lf->lno_s > lineno || lf->lno_e < lineno))
return 0;
/* Make sure this line can be reversible */
if (cu_find_lineinfo(&lf->cu_die, addr, &__fname, &__lineno) > 0
&& (lineno != __lineno || strcmp(fname, __fname)))
return 0;
err = line_range_add_line(fname, lineno, lf->lr);
if (err < 0 && err != -EEXIST)
return err;
return 0;
}
/* Find line range from its line number */
static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
{
int ret;
ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf);
/* Update status */
if (ret >= 0)
if (!intlist__empty(lf->lr->line_list))
ret = lf->found = 1;
else
ret = 0; /* Lines are not found */
else {
zfree(&lf->lr->path);
}
return ret;
}
static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
{
int ret = find_line_range_by_line(in_die, data);
/*
* We have to check all instances of inlined function, because
* some execution paths can be optimized out depends on the
* function argument of instances. However, if an error occurs,
* it should be handled by the caller.
*/
return ret < 0 ? ret : 0;
}
/* Search function definition from function name */
static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
{
struct dwarf_callback_param *param = data;
struct line_finder *lf = param->data;
struct line_range *lr = lf->lr;
const char *fname;
/* Check declared file */
if (lr->file) {
fname = die_get_decl_file(sp_die);
if (!fname || strtailcmp(lr->file, fname))
return DWARF_CB_OK;
}
if (die_match_name(sp_die, lr->function) && die_is_func_def(sp_die)) {
lf->fname = die_get_decl_file(sp_die);
dwarf_decl_line(sp_die, &lr->offset);
pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
lf->lno_s = lr->offset + lr->start;
if (lf->lno_s < 0) /* Overflow */
lf->lno_s = INT_MAX;
lf->lno_e = lr->offset + lr->end;
if (lf->lno_e < 0) /* Overflow */
lf->lno_e = INT_MAX;
pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
lr->start = lf->lno_s;
lr->end = lf->lno_e;
if (!die_is_func_instance(sp_die))
param->retval = die_walk_instances(sp_die,
line_range_inline_cb, lf);
else
param->retval = find_line_range_by_line(sp_die, lf);
return DWARF_CB_ABORT;
}
return DWARF_CB_OK;
}
static int find_line_range_by_func(struct line_finder *lf)
{
struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
dwarf_getfuncs(&lf->cu_die, line_range_search_cb, ¶m, 0);
return param.retval;
}
int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr)
{
struct line_finder lf = {.lr = lr, .found = 0};
int ret = 0;
Dwarf_Off off = 0, noff;
size_t cuhl;
Dwarf_Die *diep;
const char *comp_dir;
/* Fastpath: lookup by function name from .debug_pubnames section */
if (lr->function) {
struct pubname_callback_param pubname_param = {
.function = lr->function, .file = lr->file,
.cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0};
struct dwarf_callback_param line_range_param = {
.data = (void *)&lf, .retval = 0};
dwarf_getpubnames(dbg->dbg, pubname_search_cb,
&pubname_param, 0);
if (pubname_param.found) {
line_range_search_cb(&lf.sp_die, &line_range_param);
if (lf.found)
goto found;
}
}
/* Loop on CUs (Compilation Unit) */
while (!lf.found && ret >= 0) {
if (dwarf_nextcu(dbg->dbg, off, &noff, &cuhl,
NULL, NULL, NULL) != 0)
break;
/* Get the DIE(Debugging Information Entry) of this CU */
diep = dwarf_offdie(dbg->dbg, off + cuhl, &lf.cu_die);
if (!diep) {
off = noff;
continue;
}
/* Check if target file is included. */
if (lr->file)
lf.fname = cu_find_realpath(&lf.cu_die, lr->file);
else
lf.fname = 0;
if (!lr->file || lf.fname) {
if (lr->function)
ret = find_line_range_by_func(&lf);
else {
lf.lno_s = lr->start;
lf.lno_e = lr->end;
ret = find_line_range_by_line(NULL, &lf);
}
}
off = noff;
}
found:
/* Store comp_dir */
if (lf.found) {
comp_dir = cu_get_comp_dir(&lf.cu_die);
if (comp_dir) {
lr->comp_dir = strdup(comp_dir);
if (!lr->comp_dir)
ret = -ENOMEM;
}
}
pr_debug("path: %s\n", lr->path);
return (ret < 0) ? ret : lf.found;
}
#ifdef HAVE_DEBUGINFOD_SUPPORT
/* debuginfod doesn't require the comp_dir but buildid is required */
static int get_source_from_debuginfod(const char *raw_path,
const char *sbuild_id, char **new_path)
{
debuginfod_client *c = debuginfod_begin();
const char *p = raw_path;
int fd;
if (!c)
return -ENOMEM;
fd = debuginfod_find_source(c, (const unsigned char *)sbuild_id,
0, p, new_path);
pr_debug("Search %s from debuginfod -> %d\n", p, fd);
if (fd >= 0)
close(fd);
debuginfod_end(c);
if (fd < 0) {
pr_debug("Failed to find %s in debuginfod (%s)\n",
raw_path, sbuild_id);
return -ENOENT;
}
pr_debug("Got a source %s\n", *new_path);
return 0;
}
#else
static inline int get_source_from_debuginfod(const char *raw_path __maybe_unused,
const char *sbuild_id __maybe_unused,
char **new_path __maybe_unused)
{
return -ENOTSUP;
}
#endif
/*
* Find a src file from a DWARF tag path. Prepend optional source path prefix
* and chop off leading directories that do not exist. Result is passed back as
* a newly allocated path on success.
* Return 0 if file was found and readable, -errno otherwise.
*/
int find_source_path(const char *raw_path, const char *sbuild_id,
const char *comp_dir, char **new_path)
{
const char *prefix = symbol_conf.source_prefix;
if (sbuild_id && !prefix) {
if (!get_source_from_debuginfod(raw_path, sbuild_id, new_path))
return 0;
}
if (!prefix) {
if (raw_path[0] != '/' && comp_dir)
/* If not an absolute path, try to use comp_dir */
prefix = comp_dir;
else {
if (access(raw_path, R_OK) == 0) {
*new_path = strdup(raw_path);
return *new_path ? 0 : -ENOMEM;
} else
return -errno;
}
}
*new_path = malloc((strlen(prefix) + strlen(raw_path) + 2));
if (!*new_path)
return -ENOMEM;
for (;;) {
sprintf(*new_path, "%s/%s", prefix, raw_path);
if (access(*new_path, R_OK) == 0)
return 0;
if (!symbol_conf.source_prefix) {
/* In case of searching comp_dir, don't retry */
zfree(new_path);
return -errno;
}
switch (errno) {
case ENAMETOOLONG:
case ENOENT:
case EROFS:
case EFAULT:
raw_path = strchr(++raw_path, '/');
if (!raw_path) {
zfree(new_path);
return -ENOENT;
}
continue;
default:
zfree(new_path);
return -errno;
}
}
}
| linux-master | tools/perf/util/probe-finder.c |
// SPDX-License-Identifier: GPL-2.0
#include "unwind.h"
#include "dso.h"
#include "map.h"
#include "thread.h"
#include "session.h"
#include "debug.h"
#include "env.h"
#include "callchain.h"
struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
{
RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
}
int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
{
const char *arch;
enum dso_type dso_type;
struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
struct dso *dso = map__dso(map);
struct machine *machine;
int err;
if (!dwarf_callchain_users)
return 0;
if (maps__addr_space(maps)) {
pr_debug("unwind: thread map already set, dso=%s\n", dso->name);
if (initialized)
*initialized = true;
return 0;
}
machine = maps__machine(maps);
/* env->arch is NULL for live-mode (i.e. perf top) */
if (!machine->env || !machine->env->arch)
goto out_register;
dso_type = dso__type(dso, machine);
if (dso_type == DSO__TYPE_UNKNOWN)
return 0;
arch = perf_env__arch(machine->env);
if (!strcmp(arch, "x86")) {
if (dso_type != DSO__TYPE_64BIT)
ops = x86_32_unwind_libunwind_ops;
} else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
if (dso_type == DSO__TYPE_64BIT)
ops = arm64_unwind_libunwind_ops;
}
if (!ops) {
pr_warning_once("unwind: target platform=%s is not supported\n", arch);
return 0;
}
out_register:
unwind__register_ops(maps, ops);
err = maps__unwind_libunwind_ops(maps)->prepare_access(maps);
if (initialized)
*initialized = err ? false : true;
return err;
}
void unwind__flush_access(struct maps *maps)
{
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
if (ops)
ops->flush_access(maps);
}
void unwind__finish_access(struct maps *maps)
{
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
if (ops)
ops->finish_access(maps);
}
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data, int max_stack,
bool best_effort)
{
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(thread__maps(thread));
if (ops)
return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
return 0;
}
| linux-master | tools/perf/util/unwind-libunwind.c |
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <fcntl.h>
#include <unistd.h>
#include <inttypes.h>
#include "annotate.h"
#include "build-id.h"
#include "cap.h"
#include "dso.h"
#include "util.h" // lsdir()
#include "debug.h"
#include "event.h"
#include "machine.h"
#include "map.h"
#include "symbol.h"
#include "map_symbol.h"
#include "mem-events.h"
#include "symsrc.h"
#include "strlist.h"
#include "intlist.h"
#include "namespaces.h"
#include "header.h"
#include "path.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
#include <elf.h>
#include <limits.h>
#include <symbol/kallsyms.h>
#include <sys/utsname.h>
static int dso__load_kernel_sym(struct dso *dso, struct map *map);
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
static bool symbol__is_idle(const char *name);
int vmlinux_path__nr_entries;
char **vmlinux_path;
struct map_list_node {
struct list_head node;
struct map *map;
};
struct symbol_conf symbol_conf = {
.nanosecs = false,
.use_modules = true,
.try_vmlinux_path = true,
.demangle = true,
.demangle_kernel = false,
.cumulate_callchain = true,
.time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
.show_hist_headers = true,
.symfs = "",
.event_group = true,
.inline_name = true,
.res_sample = 0,
};
static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__KALLSYMS,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
DSO_BINARY_TYPE__JAVA_JIT,
DSO_BINARY_TYPE__DEBUGLINK,
DSO_BINARY_TYPE__BUILD_ID_CACHE,
DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
static struct map_list_node *map_list_node__new(void)
{
return malloc(sizeof(struct map_list_node));
}
static bool symbol_type__filter(char symbol_type)
{
symbol_type = toupper(symbol_type);
return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
}
static int prefix_underscores_count(const char *str)
{
const char *tail = str;
while (*tail == '_')
tail++;
return tail - str;
}
const char * __weak arch__normalize_symbol_name(const char *name)
{
return name;
}
int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
{
return strcmp(namea, nameb);
}
int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
unsigned int n)
{
return strncmp(namea, nameb, n);
}
int __weak arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
/* Avoid "SyS" kernel syscall aliases */
if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
return SYMBOL_B;
if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
return SYMBOL_B;
return SYMBOL_A;
}
static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
{
s64 a;
s64 b;
size_t na, nb;
/* Prefer a symbol with non zero length */
a = syma->end - syma->start;
b = symb->end - symb->start;
if ((b == 0) && (a > 0))
return SYMBOL_A;
else if ((a == 0) && (b > 0))
return SYMBOL_B;
/* Prefer a non weak symbol over a weak one */
a = syma->binding == STB_WEAK;
b = symb->binding == STB_WEAK;
if (b && !a)
return SYMBOL_A;
if (a && !b)
return SYMBOL_B;
/* Prefer a global symbol over a non global one */
a = syma->binding == STB_GLOBAL;
b = symb->binding == STB_GLOBAL;
if (a && !b)
return SYMBOL_A;
if (b && !a)
return SYMBOL_B;
/* Prefer a symbol with less underscores */
a = prefix_underscores_count(syma->name);
b = prefix_underscores_count(symb->name);
if (b > a)
return SYMBOL_A;
else if (a > b)
return SYMBOL_B;
/* Choose the symbol with the longest name */
na = strlen(syma->name);
nb = strlen(symb->name);
if (na > nb)
return SYMBOL_A;
else if (na < nb)
return SYMBOL_B;
return arch__choose_best_symbol(syma, symb);
}
void symbols__fixup_duplicate(struct rb_root_cached *symbols)
{
struct rb_node *nd;
struct symbol *curr, *next;
if (symbol_conf.allow_aliases)
return;
nd = rb_first_cached(symbols);
while (nd) {
curr = rb_entry(nd, struct symbol, rb_node);
again:
nd = rb_next(&curr->rb_node);
next = rb_entry(nd, struct symbol, rb_node);
if (!nd)
break;
if (curr->start != next->start)
continue;
if (choose_best_symbol(curr, next) == SYMBOL_A) {
if (next->type == STT_GNU_IFUNC)
curr->ifunc_alias = true;
rb_erase_cached(&next->rb_node, symbols);
symbol__delete(next);
goto again;
} else {
if (curr->type == STT_GNU_IFUNC)
next->ifunc_alias = true;
nd = rb_next(&curr->rb_node);
rb_erase_cached(&curr->rb_node, symbols);
symbol__delete(curr);
}
}
}
/* Update zero-sized symbols using the address of the next symbol */
void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
{
struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
if (prevnd == NULL)
return;
curr = rb_entry(prevnd, struct symbol, rb_node);
for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
prev = curr;
curr = rb_entry(nd, struct symbol, rb_node);
/*
* On some architecture kernel text segment start is located at
* some low memory address, while modules are located at high
* memory addresses (or vice versa). The gap between end of
* kernel text segment and beginning of first module's text
* segment is very big. Therefore do not fill this gap and do
* not assign it to the kernel dso map (kallsyms).
*
* In kallsyms, it determines module symbols using '[' character
* like in:
* ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
*/
if (prev->end == prev->start) {
/* Last kernel/module symbol mapped to end of page */
if (is_kallsyms && (!strchr(prev->name, '[') !=
!strchr(curr->name, '[')))
prev->end = roundup(prev->end + 4096, 4096);
else
prev->end = curr->start;
pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
__func__, prev->name, prev->end);
}
}
/* Last entry */
if (curr->end == curr->start)
curr->end = roundup(curr->start, 4096) + 4096;
}
void maps__fixup_end(struct maps *maps)
{
struct map_rb_node *prev = NULL, *curr;
down_write(maps__lock(maps));
maps__for_each_entry(maps, curr) {
if (prev != NULL && !map__end(prev->map))
map__set_end(prev->map, map__start(curr->map));
prev = curr;
}
/*
* We still haven't the actual symbols, so guess the
* last map final address.
*/
if (curr && !map__end(curr->map))
map__set_end(curr->map, ~0ULL);
up_write(maps__lock(maps));
}
struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
{
size_t namelen = strlen(name) + 1;
struct symbol *sym = calloc(1, (symbol_conf.priv_size +
sizeof(*sym) + namelen));
if (sym == NULL)
return NULL;
if (symbol_conf.priv_size) {
if (symbol_conf.init_annotation) {
struct annotation *notes = (void *)sym;
annotation__init(notes);
}
sym = ((void *)sym) + symbol_conf.priv_size;
}
sym->start = start;
sym->end = len ? start + len : start;
sym->type = type;
sym->binding = binding;
sym->namelen = namelen - 1;
pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
__func__, name, start, sym->end);
memcpy(sym->name, name, namelen);
return sym;
}
void symbol__delete(struct symbol *sym)
{
if (symbol_conf.priv_size) {
if (symbol_conf.init_annotation) {
struct annotation *notes = symbol__annotation(sym);
annotation__exit(notes);
}
}
free(((void *)sym) - symbol_conf.priv_size);
}
void symbols__delete(struct rb_root_cached *symbols)
{
struct symbol *pos;
struct rb_node *next = rb_first_cached(symbols);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
rb_erase_cached(&pos->rb_node, symbols);
symbol__delete(pos);
}
}
void __symbols__insert(struct rb_root_cached *symbols,
struct symbol *sym, bool kernel)
{
struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
bool leftmost = true;
if (kernel) {
const char *name = sym->name;
/*
* ppc64 uses function descriptors and appends a '.' to the
* start of every instruction address. Remove it.
*/
if (name[0] == '.')
name++;
sym->idle = symbol__is_idle(name);
}
while (*p != NULL) {
parent = *p;
s = rb_entry(parent, struct symbol, rb_node);
if (ip < s->start)
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&sym->rb_node, parent, p);
rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
}
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
{
__symbols__insert(symbols, sym, false);
}
static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
{
struct rb_node *n;
if (symbols == NULL)
return NULL;
n = symbols->rb_root.rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
if (ip < s->start)
n = n->rb_left;
else if (ip > s->end || (ip == s->end && ip != s->start))
n = n->rb_right;
else
return s;
}
return NULL;
}
static struct symbol *symbols__first(struct rb_root_cached *symbols)
{
struct rb_node *n = rb_first_cached(symbols);
if (n)
return rb_entry(n, struct symbol, rb_node);
return NULL;
}
static struct symbol *symbols__last(struct rb_root_cached *symbols)
{
struct rb_node *n = rb_last(&symbols->rb_root);
if (n)
return rb_entry(n, struct symbol, rb_node);
return NULL;
}
static struct symbol *symbols__next(struct symbol *sym)
{
struct rb_node *n = rb_next(&sym->rb_node);
if (n)
return rb_entry(n, struct symbol, rb_node);
return NULL;
}
static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs)
{
const struct symbol *lhs = *((const struct symbol **)vlhs);
const struct symbol *rhs = *((const struct symbol **)vrhs);
return strcmp(lhs->name, rhs->name);
}
static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len)
{
struct rb_node *nd;
struct symbol **result;
size_t i = 0, size = 0;
for (nd = rb_first_cached(source); nd; nd = rb_next(nd))
size++;
result = malloc(sizeof(*result) * size);
if (!result)
return NULL;
for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
result[i++] = pos;
}
qsort(result, size, sizeof(*result), symbols__sort_name_cmp);
*len = size;
return result;
}
int symbol__match_symbol_name(const char *name, const char *str,
enum symbol_tag_include includes)
{
const char *versioning;
if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
(versioning = strstr(name, "@@"))) {
int len = strlen(str);
if (len < versioning - name)
len = versioning - name;
return arch__compare_symbol_names_n(name, str, len);
} else
return arch__compare_symbol_names(name, str);
}
static struct symbol *symbols__find_by_name(struct symbol *symbols[],
size_t symbols_len,
const char *name,
enum symbol_tag_include includes,
size_t *found_idx)
{
size_t i, lower = 0, upper = symbols_len;
struct symbol *s = NULL;
if (found_idx)
*found_idx = SIZE_MAX;
if (!symbols_len)
return NULL;
while (lower < upper) {
int cmp;
i = (lower + upper) / 2;
cmp = symbol__match_symbol_name(symbols[i]->name, name, includes);
if (cmp > 0)
upper = i;
else if (cmp < 0)
lower = i + 1;
else {
if (found_idx)
*found_idx = i;
s = symbols[i];
break;
}
}
if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) {
/* return first symbol that has same name (if any) */
for (; i > 0; i--) {
struct symbol *tmp = symbols[i - 1];
if (!arch__compare_symbol_names(tmp->name, s->name)) {
if (found_idx)
*found_idx = i - 1;
s = tmp;
} else
break;
}
}
assert(!found_idx || !s || s == symbols[*found_idx]);
return s;
}
void dso__reset_find_symbol_cache(struct dso *dso)
{
dso->last_find_result.addr = 0;
dso->last_find_result.symbol = NULL;
}
void dso__insert_symbol(struct dso *dso, struct symbol *sym)
{
__symbols__insert(&dso->symbols, sym, dso->kernel);
/* update the symbol cache if necessary */
if (dso->last_find_result.addr >= sym->start &&
(dso->last_find_result.addr < sym->end ||
sym->start == sym->end)) {
dso->last_find_result.symbol = sym;
}
}
void dso__delete_symbol(struct dso *dso, struct symbol *sym)
{
rb_erase_cached(&sym->rb_node, &dso->symbols);
symbol__delete(sym);
dso__reset_find_symbol_cache(dso);
}
struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
{
if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
dso->last_find_result.addr = addr;
dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
}
return dso->last_find_result.symbol;
}
struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr)
{
return symbols__find(&dso->symbols, addr);
}
struct symbol *dso__first_symbol(struct dso *dso)
{
return symbols__first(&dso->symbols);
}
struct symbol *dso__last_symbol(struct dso *dso)
{
return symbols__last(&dso->symbols);
}
struct symbol *dso__next_symbol(struct symbol *sym)
{
return symbols__next(sym);
}
struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
{
if (*idx + 1 >= dso->symbol_names_len)
return NULL;
++*idx;
return dso->symbol_names[*idx];
}
/*
* Returns first symbol that matched with @name.
*/
struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx)
{
struct symbol *s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
name, SYMBOL_TAG_INCLUDE__NONE, idx);
if (!s)
s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
return s;
}
void dso__sort_by_name(struct dso *dso)
{
mutex_lock(&dso->lock);
if (!dso__sorted_by_name(dso)) {
size_t len;
dso->symbol_names = symbols__sort_by_name(&dso->symbols, &len);
if (dso->symbol_names) {
dso->symbol_names_len = len;
dso__set_sorted_by_name(dso);
}
}
mutex_unlock(&dso->lock);
}
/*
* While we find nice hex chars, build a long_val.
* Return number of chars processed.
*/
static int hex2u64(const char *ptr, u64 *long_val)
{
char *p;
*long_val = strtoull(ptr, &p, 16);
return p - ptr;
}
int modules__parse(const char *filename, void *arg,
int (*process_module)(void *arg, const char *name,
u64 start, u64 size))
{
char *line = NULL;
size_t n;
FILE *file;
int err = 0;
file = fopen(filename, "r");
if (file == NULL)
return -1;
while (1) {
char name[PATH_MAX];
u64 start, size;
char *sep, *endptr;
ssize_t line_len;
line_len = getline(&line, &n, file);
if (line_len < 0) {
if (feof(file))
break;
err = -1;
goto out;
}
if (!line) {
err = -1;
goto out;
}
line[--line_len] = '\0'; /* \n */
sep = strrchr(line, 'x');
if (sep == NULL)
continue;
hex2u64(sep + 1, &start);
sep = strchr(line, ' ');
if (sep == NULL)
continue;
*sep = '\0';
scnprintf(name, sizeof(name), "[%s]", line);
size = strtoul(sep + 1, &endptr, 0);
if (*endptr != ' ' && *endptr != '\t')
continue;
err = process_module(arg, name, start, size);
if (err)
break;
}
out:
free(line);
fclose(file);
return err;
}
/*
* These are symbols in the kernel image, so make sure that
* sym is from a kernel DSO.
*/
static bool symbol__is_idle(const char *name)
{
const char * const idle_symbols[] = {
"acpi_idle_do_entry",
"acpi_processor_ffh_cstate_enter",
"arch_cpu_idle",
"cpu_idle",
"cpu_startup_entry",
"idle_cpu",
"intel_idle",
"default_idle",
"native_safe_halt",
"enter_idle",
"exit_idle",
"mwait_idle",
"mwait_idle_with_hints",
"mwait_idle_with_hints.constprop.0",
"poll_idle",
"ppc64_runlatch_off",
"pseries_dedicated_idle_sleep",
"psw_idle",
"psw_idle_exit",
NULL
};
int i;
static struct strlist *idle_symbols_list;
if (idle_symbols_list)
return strlist__has_entry(idle_symbols_list, name);
idle_symbols_list = strlist__new(NULL, NULL);
for (i = 0; idle_symbols[i]; i++)
strlist__add(idle_symbols_list, idle_symbols[i]);
return strlist__has_entry(idle_symbols_list, name);
}
static int map__process_kallsym_symbol(void *arg, const char *name,
char type, u64 start)
{
struct symbol *sym;
struct dso *dso = arg;
struct rb_root_cached *root = &dso->symbols;
if (!symbol_type__filter(type))
return 0;
/* Ignore local symbols for ARM modules */
if (name[0] == '$')
return 0;
/*
* module symbols are not sorted so we add all
* symbols, setting length to 0, and rely on
* symbols__fixup_end() to fix it up.
*/
sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
if (sym == NULL)
return -ENOMEM;
/*
* We will pass the symbols to the filter later, in
* map__split_kallsyms, when we have split the maps per module
*/
__symbols__insert(root, sym, !strchr(name, '['));
return 0;
}
/*
* Loads the function entries in /proc/kallsyms into kernel_map->dso,
* so that we can in the next step set the symbol ->end address and then
* call kernel_maps__split_kallsyms.
*/
static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
{
return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
}
static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
{
struct map *curr_map;
struct symbol *pos;
int count = 0;
struct rb_root_cached old_root = dso->symbols;
struct rb_root_cached *root = &dso->symbols;
struct rb_node *next = rb_first_cached(root);
if (!kmaps)
return -1;
*root = RB_ROOT_CACHED;
while (next) {
struct dso *curr_map_dso;
char *module;
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
rb_erase_cached(&pos->rb_node, &old_root);
RB_CLEAR_NODE(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module)
*module = '\0';
curr_map = maps__find(kmaps, pos->start);
if (!curr_map) {
symbol__delete(pos);
continue;
}
curr_map_dso = map__dso(curr_map);
pos->start -= map__start(curr_map) - map__pgoff(curr_map);
if (pos->end > map__end(curr_map))
pos->end = map__end(curr_map);
if (pos->end)
pos->end -= map__start(curr_map) - map__pgoff(curr_map);
symbols__insert(&curr_map_dso->symbols, pos);
++count;
}
/* Symbols have been adjusted */
dso->adjust_symbols = 1;
return count;
}
/*
* Split the symbols into maps, making sure there are no overlaps, i.e. the
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
struct map *initial_map)
{
struct machine *machine;
struct map *curr_map = initial_map;
struct symbol *pos;
int count = 0, moved = 0;
struct rb_root_cached *root = &dso->symbols;
struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
if (!kmaps)
return -1;
machine = maps__machine(kmaps);
x86_64 = machine__is(machine, "x86_64");
while (next) {
char *module;
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module) {
struct dso *curr_map_dso;
if (!symbol_conf.use_modules)
goto discard_symbol;
*module++ = '\0';
curr_map_dso = map__dso(curr_map);
if (strcmp(curr_map_dso->short_name, module)) {
if (RC_CHK_ACCESS(curr_map) != RC_CHK_ACCESS(initial_map) &&
dso->kernel == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(machine)) {
/*
* We assume all symbols of a module are
* continuous in * kallsyms, so curr_map
* points to a module and all its
* symbols are in its kmap. Mark it as
* loaded.
*/
dso__set_loaded(curr_map_dso);
}
curr_map = maps__find_by_name(kmaps, module);
if (curr_map == NULL) {
pr_debug("%s/proc/{kallsyms,modules} "
"inconsistency while looking "
"for \"%s\" module!\n",
machine->root_dir, module);
curr_map = initial_map;
goto discard_symbol;
}
curr_map_dso = map__dso(curr_map);
if (curr_map_dso->loaded &&
!machine__is_default_guest(machine))
goto discard_symbol;
}
/*
* So that we look just like we get from .ko files,
* i.e. not prelinked, relative to initial_map->start.
*/
pos->start = map__map_ip(curr_map, pos->start);
pos->end = map__map_ip(curr_map, pos->end);
} else if (x86_64 && is_entry_trampoline(pos->name)) {
/*
* These symbols are not needed anymore since the
* trampoline maps refer to the text section and it's
* symbols instead. Avoid having to deal with
* relocations, and the assumption that the first symbol
* is the start of kernel text, by simply removing the
* symbols at this point.
*/
goto discard_symbol;
} else if (curr_map != initial_map) {
char dso_name[PATH_MAX];
struct dso *ndso;
if (delta) {
/* Kernel was relocated at boot time */
pos->start -= delta;
pos->end -= delta;
}
if (count == 0) {
curr_map = initial_map;
goto add_symbol;
}
if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
snprintf(dso_name, sizeof(dso_name),
"[guest.kernel].%d",
kernel_range++);
else
snprintf(dso_name, sizeof(dso_name),
"[kernel].%d",
kernel_range++);
ndso = dso__new(dso_name);
if (ndso == NULL)
return -1;
ndso->kernel = dso->kernel;
curr_map = map__new2(pos->start, ndso);
if (curr_map == NULL) {
dso__put(ndso);
return -1;
}
map__set_map_ip(curr_map, identity__map_ip);
map__set_unmap_ip(curr_map, identity__map_ip);
if (maps__insert(kmaps, curr_map)) {
dso__put(ndso);
return -1;
}
++kernel_range;
} else if (delta) {
/* Kernel was relocated at boot time */
pos->start -= delta;
pos->end -= delta;
}
add_symbol:
if (curr_map != initial_map) {
struct dso *curr_map_dso = map__dso(curr_map);
rb_erase_cached(&pos->rb_node, root);
symbols__insert(&curr_map_dso->symbols, pos);
++moved;
} else
++count;
continue;
discard_symbol:
rb_erase_cached(&pos->rb_node, root);
symbol__delete(pos);
}
if (curr_map != initial_map &&
dso->kernel == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(maps__machine(kmaps))) {
dso__set_loaded(map__dso(curr_map));
}
return count + moved;
}
bool symbol__restricted_filename(const char *filename,
const char *restricted_filename)
{
bool restricted = false;
if (symbol_conf.kptr_restrict) {
char *r = realpath(filename, NULL);
if (r != NULL) {
restricted = strcmp(r, restricted_filename) == 0;
free(r);
return restricted;
}
}
return restricted;
}
struct module_info {
struct rb_node rb_node;
char *name;
u64 start;
};
static void add_module(struct module_info *mi, struct rb_root *modules)
{
struct rb_node **p = &modules->rb_node;
struct rb_node *parent = NULL;
struct module_info *m;
while (*p != NULL) {
parent = *p;
m = rb_entry(parent, struct module_info, rb_node);
if (strcmp(mi->name, m->name) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&mi->rb_node, parent, p);
rb_insert_color(&mi->rb_node, modules);
}
static void delete_modules(struct rb_root *modules)
{
struct module_info *mi;
struct rb_node *next = rb_first(modules);
while (next) {
mi = rb_entry(next, struct module_info, rb_node);
next = rb_next(&mi->rb_node);
rb_erase(&mi->rb_node, modules);
zfree(&mi->name);
free(mi);
}
}
static struct module_info *find_module(const char *name,
struct rb_root *modules)
{
struct rb_node *n = modules->rb_node;
while (n) {
struct module_info *m;
int cmp;
m = rb_entry(n, struct module_info, rb_node);
cmp = strcmp(name, m->name);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else
return m;
}
return NULL;
}
static int __read_proc_modules(void *arg, const char *name, u64 start,
u64 size __maybe_unused)
{
struct rb_root *modules = arg;
struct module_info *mi;
mi = zalloc(sizeof(struct module_info));
if (!mi)
return -ENOMEM;
mi->name = strdup(name);
mi->start = start;
if (!mi->name) {
free(mi);
return -ENOMEM;
}
add_module(mi, modules);
return 0;
}
static int read_proc_modules(const char *filename, struct rb_root *modules)
{
if (symbol__restricted_filename(filename, "/proc/modules"))
return -1;
if (modules__parse(filename, modules, __read_proc_modules)) {
delete_modules(modules);
return -1;
}
return 0;
}
int compare_proc_modules(const char *from, const char *to)
{
struct rb_root from_modules = RB_ROOT;
struct rb_root to_modules = RB_ROOT;
struct rb_node *from_node, *to_node;
struct module_info *from_m, *to_m;
int ret = -1;
if (read_proc_modules(from, &from_modules))
return -1;
if (read_proc_modules(to, &to_modules))
goto out_delete_from;
from_node = rb_first(&from_modules);
to_node = rb_first(&to_modules);
while (from_node) {
if (!to_node)
break;
from_m = rb_entry(from_node, struct module_info, rb_node);
to_m = rb_entry(to_node, struct module_info, rb_node);
if (from_m->start != to_m->start ||
strcmp(from_m->name, to_m->name))
break;
from_node = rb_next(from_node);
to_node = rb_next(to_node);
}
if (!from_node && !to_node)
ret = 0;
delete_modules(&to_modules);
out_delete_from:
delete_modules(&from_modules);
return ret;
}
static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
{
struct rb_root modules = RB_ROOT;
struct map_rb_node *old_node;
int err;
err = read_proc_modules(filename, &modules);
if (err)
return err;
maps__for_each_entry(kmaps, old_node) {
struct map *old_map = old_node->map;
struct module_info *mi;
struct dso *dso;
if (!__map__is_kmodule(old_map)) {
continue;
}
dso = map__dso(old_map);
/* Module must be in memory at the same address */
mi = find_module(dso->short_name, &modules);
if (!mi || mi->start != map__start(old_map)) {
err = -EINVAL;
goto out;
}
}
out:
delete_modules(&modules);
return err;
}
/*
* If kallsyms is referenced by name then we look for filename in the same
* directory.
*/
static bool filename_from_kallsyms_filename(char *filename,
const char *base_name,
const char *kallsyms_filename)
{
char *name;
strcpy(filename, kallsyms_filename);
name = strrchr(filename, '/');
if (!name)
return false;
name += 1;
if (!strcmp(name, "kallsyms")) {
strcpy(name, base_name);
return true;
}
return false;
}
static int validate_kcore_modules(const char *kallsyms_filename,
struct map *map)
{
struct maps *kmaps = map__kmaps(map);
char modules_filename[PATH_MAX];
if (!kmaps)
return -EINVAL;
if (!filename_from_kallsyms_filename(modules_filename, "modules",
kallsyms_filename))
return -EINVAL;
if (do_validate_kcore_modules(modules_filename, kmaps))
return -EINVAL;
return 0;
}
static int validate_kcore_addresses(const char *kallsyms_filename,
struct map *map)
{
struct kmap *kmap = map__kmap(map);
if (!kmap)
return -EINVAL;
if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
u64 start;
if (kallsyms__get_function_start(kallsyms_filename,
kmap->ref_reloc_sym->name, &start))
return -ENOENT;
if (start != kmap->ref_reloc_sym->addr)
return -EINVAL;
}
return validate_kcore_modules(kallsyms_filename, map);
}
struct kcore_mapfn_data {
struct dso *dso;
struct list_head maps;
};
static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
{
struct kcore_mapfn_data *md = data;
struct map_list_node *list_node = map_list_node__new();
if (!list_node)
return -ENOMEM;
list_node->map = map__new2(start, md->dso);
if (!list_node->map) {
free(list_node);
return -ENOMEM;
}
map__set_end(list_node->map, map__start(list_node->map) + len);
map__set_pgoff(list_node->map, pgoff);
list_add(&list_node->node, &md->maps);
return 0;
}
/*
* Merges map into maps by splitting the new map within the existing map
* regions.
*/
int maps__merge_in(struct maps *kmaps, struct map *new_map)
{
struct map_rb_node *rb_node;
LIST_HEAD(merged);
int err = 0;
maps__for_each_entry(kmaps, rb_node) {
struct map *old_map = rb_node->map;
/* no overload with this one */
if (map__end(new_map) < map__start(old_map) ||
map__start(new_map) >= map__end(old_map))
continue;
if (map__start(new_map) < map__start(old_map)) {
/*
* |new......
* |old....
*/
if (map__end(new_map) < map__end(old_map)) {
/*
* |new......| -> |new..|
* |old....| -> |old....|
*/
map__set_end(new_map, map__start(old_map));
} else {
/*
* |new.............| -> |new..| |new..|
* |old....| -> |old....|
*/
struct map_list_node *m = map_list_node__new();
if (!m) {
err = -ENOMEM;
goto out;
}
m->map = map__clone(new_map);
if (!m->map) {
free(m);
err = -ENOMEM;
goto out;
}
map__set_end(m->map, map__start(old_map));
list_add_tail(&m->node, &merged);
map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
map__set_start(new_map, map__end(old_map));
}
} else {
/*
* |new......
* |old....
*/
if (map__end(new_map) < map__end(old_map)) {
/*
* |new..| -> x
* |old.........| -> |old.........|
*/
map__put(new_map);
new_map = NULL;
break;
} else {
/*
* |new......| -> |new...|
* |old....| -> |old....|
*/
map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
map__set_start(new_map, map__end(old_map));
}
}
}
out:
while (!list_empty(&merged)) {
struct map_list_node *old_node;
old_node = list_entry(merged.next, struct map_list_node, node);
list_del_init(&old_node->node);
if (!err)
err = maps__insert(kmaps, old_node->map);
map__put(old_node->map);
free(old_node);
}
if (new_map) {
if (!err)
err = maps__insert(kmaps, new_map);
map__put(new_map);
}
return err;
}
static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename)
{
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
struct map *replacement_map = NULL;
struct map_rb_node *old_node, *next;
struct machine *machine;
bool is_64_bit;
int err, fd;
char kcore_filename[PATH_MAX];
u64 stext;
if (!kmaps)
return -EINVAL;
machine = maps__machine(kmaps);
/* This function requires that the map is the kernel map */
if (!__map__is_kernel(map))
return -EINVAL;
if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
kallsyms_filename))
return -EINVAL;
/* Modules and kernel must be present at their original addresses */
if (validate_kcore_addresses(kallsyms_filename, map))
return -EINVAL;
md.dso = dso;
INIT_LIST_HEAD(&md.maps);
fd = open(kcore_filename, O_RDONLY);
if (fd < 0) {
pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
kcore_filename);
return -EINVAL;
}
/* Read new maps into temporary lists */
err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md,
&is_64_bit);
if (err)
goto out_err;
dso->is_64_bit = is_64_bit;
if (list_empty(&md.maps)) {
err = -EINVAL;
goto out_err;
}
/* Remove old maps */
maps__for_each_entry_safe(kmaps, old_node, next) {
struct map *old_map = old_node->map;
/*
* We need to preserve eBPF maps even if they are
* covered by kcore, because we need to access
* eBPF dso for source data.
*/
if (old_map != map && !__map__is_bpf_prog(old_map))
maps__remove(kmaps, old_map);
}
machine->trampolines_mapped = false;
/* Find the kernel map using the '_stext' symbol */
if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
u64 replacement_size = 0;
struct map_list_node *new_node;
list_for_each_entry(new_node, &md.maps, node) {
struct map *new_map = new_node->map;
u64 new_size = map__size(new_map);
if (!(stext >= map__start(new_map) && stext < map__end(new_map)))
continue;
/*
* On some architectures, ARM64 for example, the kernel
* text can get allocated inside of the vmalloc segment.
* Select the smallest matching segment, in case stext
* falls within more than one in the list.
*/
if (!replacement_map || new_size < replacement_size) {
replacement_map = new_map;
replacement_size = new_size;
}
}
}
if (!replacement_map)
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
/* Add new maps */
while (!list_empty(&md.maps)) {
struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
struct map *new_map = new_node->map;
list_del_init(&new_node->node);
if (RC_CHK_ACCESS(new_map) == RC_CHK_ACCESS(replacement_map)) {
struct map *map_ref;
map__set_start(map, map__start(new_map));
map__set_end(map, map__end(new_map));
map__set_pgoff(map, map__pgoff(new_map));
map__set_map_ip(map, map__map_ip_ptr(new_map));
map__set_unmap_ip(map, map__unmap_ip_ptr(new_map));
/* Ensure maps are correctly ordered */
map_ref = map__get(map);
maps__remove(kmaps, map_ref);
err = maps__insert(kmaps, map_ref);
map__put(map_ref);
map__put(new_map);
if (err)
goto out_err;
} else {
/*
* Merge kcore map into existing maps,
* and ensure that current maps (eBPF)
* stay intact.
*/
if (maps__merge_in(kmaps, new_map)) {
err = -EINVAL;
goto out_err;
}
}
free(new_node);
}
if (machine__is(machine, "x86_64")) {
u64 addr;
/*
* If one of the corresponding symbols is there, assume the
* entry trampoline maps are too.
*/
if (!kallsyms__get_function_start(kallsyms_filename,
ENTRY_TRAMPOLINE_NAME,
&addr))
machine->trampolines_mapped = true;
}
/*
* Set the data type and long name so that kcore can be read via
* dso__data_read_addr().
*/
if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
else
dso->binary_type = DSO_BINARY_TYPE__KCORE;
dso__set_long_name(dso, strdup(kcore_filename), true);
close(fd);
if (map__prot(map) & PROT_EXEC)
pr_debug("Using %s for kernel object code\n", kcore_filename);
else
pr_debug("Using %s for kernel data\n", kcore_filename);
return 0;
out_err:
while (!list_empty(&md.maps)) {
struct map_list_node *list_node;
list_node = list_entry(md.maps.next, struct map_list_node, node);
list_del_init(&list_node->node);
map__zput(list_node->map);
free(list_node);
}
close(fd);
return err;
}
/*
* If the kernel is relocated at boot time, kallsyms won't match. Compute the
* delta based on the relocation reference symbol.
*/
static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
{
u64 addr;
if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
return 0;
if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
return -1;
*delta = addr - kmap->ref_reloc_sym->addr;
return 0;
}
int __dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, bool no_kcore)
{
struct kmap *kmap = map__kmap(map);
u64 delta = 0;
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return -1;
if (!kmap || !kmap->kmaps)
return -1;
if (dso__load_all_kallsyms(dso, filename) < 0)
return -1;
if (kallsyms__delta(kmap, filename, &delta))
return -1;
symbols__fixup_end(&dso->symbols, true);
symbols__fixup_duplicate(&dso->symbols);
if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
else
dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
if (!no_kcore && !dso__load_kcore(dso, map, filename))
return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
else
return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
}
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map)
{
return __dso__load_kallsyms(dso, filename, map, false);
}
static int dso__load_perf_map(const char *map_path, struct dso *dso)
{
char *line = NULL;
size_t n;
FILE *file;
int nr_syms = 0;
file = fopen(map_path, "r");
if (file == NULL)
goto out_failure;
while (!feof(file)) {
u64 start, size;
struct symbol *sym;
int line_len, len;
line_len = getline(&line, &n, file);
if (line_len < 0)
break;
if (!line)
goto out_failure;
line[--line_len] = '\0'; /* \n */
len = hex2u64(line, &start);
len++;
if (len + 2 >= line_len)
continue;
len += hex2u64(line + len, &size);
len++;
if (len + 2 >= line_len)
continue;
sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
if (sym == NULL)
goto out_delete_line;
symbols__insert(&dso->symbols, sym);
nr_syms++;
}
free(line);
fclose(file);
return nr_syms;
out_delete_line:
free(line);
out_failure:
return -1;
}
#ifdef HAVE_LIBBFD_SUPPORT
#define PACKAGE 'perf'
#include <bfd.h>
static int bfd_symbols__cmpvalue(const void *a, const void *b)
{
const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
}
static int bfd2elf_binding(asymbol *symbol)
{
if (symbol->flags & BSF_WEAK)
return STB_WEAK;
if (symbol->flags & BSF_GLOBAL)
return STB_GLOBAL;
if (symbol->flags & BSF_LOCAL)
return STB_LOCAL;
return -1;
}
int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
{
int err = -1;
long symbols_size, symbols_count, i;
asection *section;
asymbol **symbols, *sym;
struct symbol *symbol;
bfd *abfd;
u64 start, len;
abfd = bfd_openr(debugfile, NULL);
if (!abfd)
return -1;
if (!bfd_check_format(abfd, bfd_object)) {
pr_debug2("%s: cannot read %s bfd file.\n", __func__,
dso->long_name);
goto out_close;
}
if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
goto out_close;
symbols_size = bfd_get_symtab_upper_bound(abfd);
if (symbols_size == 0) {
bfd_close(abfd);
return 0;
}
if (symbols_size < 0)
goto out_close;
symbols = malloc(symbols_size);
if (!symbols)
goto out_close;
symbols_count = bfd_canonicalize_symtab(abfd, symbols);
if (symbols_count < 0)
goto out_free;
section = bfd_get_section_by_name(abfd, ".text");
if (section) {
for (i = 0; i < symbols_count; ++i) {
if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
!strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
break;
}
if (i < symbols_count) {
/* PE symbols can only have 4 bytes, so use .text high bits */
dso->text_offset = section->vma - (u32)section->vma;
dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
} else {
dso->text_offset = section->vma - section->filepos;
}
}
qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
#ifdef bfd_get_section
#define bfd_asymbol_section bfd_get_section
#endif
for (i = 0; i < symbols_count; ++i) {
sym = symbols[i];
section = bfd_asymbol_section(sym);
if (bfd2elf_binding(sym) < 0)
continue;
while (i + 1 < symbols_count &&
bfd_asymbol_section(symbols[i + 1]) == section &&
bfd2elf_binding(symbols[i + 1]) < 0)
i++;
if (i + 1 < symbols_count &&
bfd_asymbol_section(symbols[i + 1]) == section)
len = symbols[i + 1]->value - sym->value;
else
len = section->size - sym->value;
start = bfd_asymbol_value(sym) - dso->text_offset;
symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
bfd_asymbol_name(sym));
if (!symbol)
goto out_free;
symbols__insert(&dso->symbols, symbol);
}
#ifdef bfd_get_section
#undef bfd_asymbol_section
#endif
symbols__fixup_end(&dso->symbols, false);
symbols__fixup_duplicate(&dso->symbols);
dso->adjust_symbols = 1;
err = 0;
out_free:
free(symbols);
out_close:
bfd_close(abfd);
return err;
}
#endif
static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
enum dso_binary_type type)
{
switch (type) {
case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__DEBUGLINK:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
return !kmod && dso->kernel == DSO_SPACE__USER;
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__KCORE:
return dso->kernel == DSO_SPACE__KERNEL;
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__GUEST_KCORE:
return dso->kernel == DSO_SPACE__KERNEL_GUEST;
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
/*
* kernel modules know their symtab type - it's set when
* creating a module dso in machine__addnew_module_map().
*/
return kmod && dso->symtab_type == type;
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
return true;
case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__BPF_IMAGE:
case DSO_BINARY_TYPE__OOL:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return false;
}
}
/* Checks for the existence of the perf-<pid>.map file in two different
* locations. First, if the process is a separate mount namespace, check in
* that namespace using the pid of the innermost pid namespace. If's not in a
* namespace, or the file can't be found there, try in the mount namespace of
* the tracing process using our view of its pid.
*/
static int dso__find_perf_map(char *filebuf, size_t bufsz,
struct nsinfo **nsip)
{
struct nscookie nsc;
struct nsinfo *nsi;
struct nsinfo *nnsi;
int rc = -1;
nsi = *nsip;
if (nsinfo__need_setns(nsi)) {
snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi));
nsinfo__mountns_enter(nsi, &nsc);
rc = access(filebuf, R_OK);
nsinfo__mountns_exit(&nsc);
if (rc == 0)
return rc;
}
nnsi = nsinfo__copy(nsi);
if (nnsi) {
nsinfo__put(nsi);
nsinfo__clear_need_setns(nnsi);
snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi));
*nsip = nnsi;
rc = 0;
}
return rc;
}
int dso__load(struct dso *dso, struct map *map)
{
char *name;
int ret = -1;
u_int i;
struct machine *machine = NULL;
char *root_dir = (char *) "";
int ss_pos = 0;
struct symsrc ss_[2];
struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
bool kmod;
bool perfmap;
struct build_id bid;
struct nscookie nsc;
char newmapname[PATH_MAX];
const char *map_path = dso->long_name;
mutex_lock(&dso->lock);
perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
if (perfmap) {
if (dso->nsinfo && (dso__find_perf_map(newmapname,
sizeof(newmapname), &dso->nsinfo) == 0)) {
map_path = newmapname;
}
}
nsinfo__mountns_enter(dso->nsinfo, &nsc);
/* check again under the dso->lock */
if (dso__loaded(dso)) {
ret = 1;
goto out;
}
kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
if (dso->kernel && !kmod) {
if (dso->kernel == DSO_SPACE__KERNEL)
ret = dso__load_kernel_sym(dso, map);
else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
ret = dso__load_guest_kernel_sym(dso, map);
machine = maps__machine(map__kmaps(map));
if (machine__is(machine, "x86_64"))
machine__map_x86_64_entry_trampolines(machine, dso);
goto out;
}
dso->adjust_symbols = 0;
if (perfmap) {
ret = dso__load_perf_map(map_path, dso);
dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
DSO_BINARY_TYPE__NOT_FOUND;
goto out;
}
if (machine)
root_dir = machine->root_dir;
name = malloc(PATH_MAX);
if (!name)
goto out;
/*
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
if (!dso->has_build_id &&
is_regular_file(dso->long_name)) {
__symbol__join_symfs(name, PATH_MAX, dso->long_name);
if (filename__read_build_id(name, &bid) > 0)
dso__set_build_id(dso, &bid);
}
/*
* Iterate over candidate debug images.
* Keep track of "interesting" ones (those which have a symtab, dynsym,
* and/or opd section) for processing.
*/
for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
struct symsrc *ss = &ss_[ss_pos];
bool next_slot = false;
bool is_reg;
bool nsexit;
int bfdrc = -1;
int sirc = -1;
enum dso_binary_type symtab_type = binary_type_symtab[i];
nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
continue;
if (dso__read_binary_type_filename(dso, symtab_type,
root_dir, name, PATH_MAX))
continue;
if (nsexit)
nsinfo__mountns_exit(&nsc);
is_reg = is_regular_file(name);
if (!is_reg && errno == ENOENT && dso->nsinfo) {
char *new_name = dso__filename_with_chroot(dso, name);
if (new_name) {
is_reg = is_regular_file(new_name);
strlcpy(name, new_name, PATH_MAX);
free(new_name);
}
}
#ifdef HAVE_LIBBFD_SUPPORT
if (is_reg)
bfdrc = dso__load_bfd_symbols(dso, name);
#endif
if (is_reg && bfdrc < 0)
sirc = symsrc__init(ss, dso, name, symtab_type);
if (nsexit)
nsinfo__mountns_enter(dso->nsinfo, &nsc);
if (bfdrc == 0) {
ret = 0;
break;
}
if (!is_reg || sirc < 0)
continue;
if (!syms_ss && symsrc__has_symtab(ss)) {
syms_ss = ss;
next_slot = true;
if (!dso->symsrc_filename)
dso->symsrc_filename = strdup(name);
}
if (!runtime_ss && symsrc__possibly_runtime(ss)) {
runtime_ss = ss;
next_slot = true;
}
if (next_slot) {
ss_pos++;
if (syms_ss && runtime_ss)
break;
} else {
symsrc__destroy(ss);
}
}
if (!runtime_ss && !syms_ss)
goto out_free;
if (runtime_ss && !syms_ss) {
syms_ss = runtime_ss;
}
/* We'll have to hope for the best */
if (!runtime_ss && syms_ss)
runtime_ss = syms_ss;
if (syms_ss)
ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
else
ret = -1;
if (ret > 0) {
int nr_plt;
nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
if (nr_plt > 0)
ret += nr_plt;
}
for (; ss_pos > 0; ss_pos--)
symsrc__destroy(&ss_[ss_pos - 1]);
out_free:
free(name);
if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
ret = 0;
out:
dso__set_loaded(dso);
mutex_unlock(&dso->lock);
nsinfo__mountns_exit(&nsc);
return ret;
}
static int map__strcmp(const void *a, const void *b)
{
const struct map *map_a = *(const struct map **)a;
const struct map *map_b = *(const struct map **)b;
const struct dso *dso_a = map__dso(map_a);
const struct dso *dso_b = map__dso(map_b);
int ret = strcmp(dso_a->short_name, dso_b->short_name);
if (ret == 0 && map_a != map_b) {
/*
* Ensure distinct but name equal maps have an order in part to
* aid reference counting.
*/
ret = (int)map__start(map_a) - (int)map__start(map_b);
if (ret == 0)
ret = (int)((intptr_t)map_a - (intptr_t)map_b);
}
return ret;
}
static int map__strcmp_name(const void *name, const void *b)
{
const struct dso *dso = map__dso(*(const struct map **)b);
return strcmp(name, dso->short_name);
}
void __maps__sort_by_name(struct maps *maps)
{
qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
}
static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
{
struct map_rb_node *rb_node;
struct map **maps_by_name = realloc(maps__maps_by_name(maps),
maps__nr_maps(maps) * sizeof(struct map *));
int i = 0;
if (maps_by_name == NULL)
return -1;
up_read(maps__lock(maps));
down_write(maps__lock(maps));
RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
maps__for_each_entry(maps, rb_node)
maps_by_name[i++] = map__get(rb_node->map);
__maps__sort_by_name(maps);
up_write(maps__lock(maps));
down_read(maps__lock(maps));
return 0;
}
static struct map *__maps__find_by_name(struct maps *maps, const char *name)
{
struct map **mapp;
if (maps__maps_by_name(maps) == NULL &&
map__groups__sort_by_name_from_rbtree(maps))
return NULL;
mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
sizeof(*mapp), map__strcmp_name);
if (mapp)
return *mapp;
return NULL;
}
struct map *maps__find_by_name(struct maps *maps, const char *name)
{
struct map_rb_node *rb_node;
struct map *map;
down_read(maps__lock(maps));
if (RC_CHK_ACCESS(maps)->last_search_by_name) {
const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
if (strcmp(dso->short_name, name) == 0) {
map = RC_CHK_ACCESS(maps)->last_search_by_name;
goto out_unlock;
}
}
/*
* If we have maps->maps_by_name, then the name isn't in the rbtree,
* as maps->maps_by_name mirrors the rbtree when lookups by name are
* made.
*/
map = __maps__find_by_name(maps, name);
if (map || maps__maps_by_name(maps) != NULL)
goto out_unlock;
/* Fallback to traversing the rbtree... */
maps__for_each_entry(maps, rb_node) {
struct dso *dso;
map = rb_node->map;
dso = map__dso(map);
if (strcmp(dso->short_name, name) == 0) {
RC_CHK_ACCESS(maps)->last_search_by_name = map;
goto out_unlock;
}
}
map = NULL;
out_unlock:
up_read(maps__lock(maps));
return map;
}
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
{
int err = -1;
struct symsrc ss;
char symfs_vmlinux[PATH_MAX];
enum dso_binary_type symtab_type;
if (vmlinux[0] == '/')
snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
else
symbol__join_symfs(symfs_vmlinux, vmlinux);
if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
else
symtab_type = DSO_BINARY_TYPE__VMLINUX;
if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
return -1;
/*
* dso__load_sym() may copy 'dso' which will result in the copies having
* an incorrect long name unless we set it here first.
*/
dso__set_long_name(dso, vmlinux, vmlinux_allocated);
if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
else
dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
err = dso__load_sym(dso, map, &ss, &ss, 0);
symsrc__destroy(&ss);
if (err > 0) {
dso__set_loaded(dso);
pr_debug("Using %s for symbols\n", symfs_vmlinux);
}
return err;
}
int dso__load_vmlinux_path(struct dso *dso, struct map *map)
{
int i, err = 0;
char *filename = NULL;
pr_debug("Looking at the vmlinux_path (%d entries long)\n",
vmlinux_path__nr_entries + 1);
for (i = 0; i < vmlinux_path__nr_entries; ++i) {
err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
if (err > 0)
goto out;
}
if (!symbol_conf.ignore_vmlinux_buildid)
filename = dso__build_id_filename(dso, NULL, 0, false);
if (filename != NULL) {
err = dso__load_vmlinux(dso, map, filename, true);
if (err > 0)
goto out;
free(filename);
}
out:
return err;
}
static bool visible_dir_filter(const char *name, struct dirent *d)
{
if (d->d_type != DT_DIR)
return false;
return lsdir_no_dot_filter(name, d);
}
static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
{
char kallsyms_filename[PATH_MAX];
int ret = -1;
struct strlist *dirs;
struct str_node *nd;
dirs = lsdir(dir, visible_dir_filter);
if (!dirs)
return -1;
strlist__for_each_entry(nd, dirs) {
scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
"%s/%s/kallsyms", dir, nd->s);
if (!validate_kcore_addresses(kallsyms_filename, map)) {
strlcpy(dir, kallsyms_filename, dir_sz);
ret = 0;
break;
}
}
strlist__delete(dirs);
return ret;
}
/*
* Use open(O_RDONLY) to check readability directly instead of access(R_OK)
* since access(R_OK) only checks with real UID/GID but open() use effective
* UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
*/
static bool filename__readable(const char *file)
{
int fd = open(file, O_RDONLY);
if (fd < 0)
return false;
close(fd);
return true;
}
static char *dso__find_kallsyms(struct dso *dso, struct map *map)
{
struct build_id bid;
char sbuild_id[SBUILD_ID_SIZE];
bool is_host = false;
char path[PATH_MAX];
if (!dso->has_build_id) {
/*
* Last resort, if we don't have a build-id and couldn't find
* any vmlinux file, try the running kernel kallsyms table.
*/
goto proc_kallsyms;
}
if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
is_host = dso__build_id_equal(dso, &bid);
/* Try a fast path for /proc/kallsyms if possible */
if (is_host) {
/*
* Do not check the build-id cache, unless we know we cannot use
* /proc/kcore or module maps don't match to /proc/kallsyms.
* To check readability of /proc/kcore, do not use access(R_OK)
* since /proc/kcore requires CAP_SYS_RAWIO to read and access
* can't check it.
*/
if (filename__readable("/proc/kcore") &&
!validate_kcore_addresses("/proc/kallsyms", map))
goto proc_kallsyms;
}
build_id__sprintf(&dso->bid, sbuild_id);
/* Find kallsyms in build-id cache with kcore */
scnprintf(path, sizeof(path), "%s/%s/%s",
buildid_dir, DSO__NAME_KCORE, sbuild_id);
if (!find_matching_kcore(map, path, sizeof(path)))
return strdup(path);
/* Use current /proc/kallsyms if possible */
if (is_host) {
proc_kallsyms:
return strdup("/proc/kallsyms");
}
/* Finally, find a cache of kallsyms */
if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
pr_err("No kallsyms or vmlinux with build-id %s was found\n",
sbuild_id);
return NULL;
}
return strdup(path);
}
static int dso__load_kernel_sym(struct dso *dso, struct map *map)
{
int err;
const char *kallsyms_filename = NULL;
char *kallsyms_allocated_filename = NULL;
char *filename = NULL;
/*
* Step 1: if the user specified a kallsyms or vmlinux filename, use
* it and only it, reporting errors to the user if it cannot be used.
*
* For instance, try to analyse an ARM perf.data file _without_ a
* build-id, or if the user specifies the wrong path to the right
* vmlinux file, obviously we can't fallback to another vmlinux (a
* x86_86 one, on the machine where analysis is being performed, say),
* or worse, /proc/kallsyms.
*
* If the specified file _has_ a build-id and there is a build-id
* section in the perf.data file, we will still do the expected
* validation in dso__load_vmlinux and will bail out if they don't
* match.
*/
if (symbol_conf.kallsyms_name != NULL) {
kallsyms_filename = symbol_conf.kallsyms_name;
goto do_kallsyms;
}
if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
}
/*
* Before checking on common vmlinux locations, check if it's
* stored as standard build id binary (not kallsyms) under
* .debug cache.
*/
if (!symbol_conf.ignore_vmlinux_buildid)
filename = __dso__build_id_filename(dso, NULL, 0, false, false);
if (filename != NULL) {
err = dso__load_vmlinux(dso, map, filename, true);
if (err > 0)
return err;
free(filename);
}
if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
err = dso__load_vmlinux_path(dso, map);
if (err > 0)
return err;
}
/* do not try local files if a symfs was given */
if (symbol_conf.symfs[0] != 0)
return -1;
kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
if (!kallsyms_allocated_filename)
return -1;
kallsyms_filename = kallsyms_allocated_filename;
do_kallsyms:
err = dso__load_kallsyms(dso, kallsyms_filename, map);
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
free(kallsyms_allocated_filename);
if (err > 0 && !dso__is_kcore(dso)) {
dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
map__fixup_start(map);
map__fixup_end(map);
}
return err;
}
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
{
int err;
const char *kallsyms_filename;
struct machine *machine = maps__machine(map__kmaps(map));
char path[PATH_MAX];
if (machine->kallsyms_filename) {
kallsyms_filename = machine->kallsyms_filename;
} else if (machine__is_default_guest(machine)) {
/*
* if the user specified a vmlinux filename, use it and only
* it, reporting errors to the user if it cannot be used.
* Or use file guest_kallsyms inputted by user on commandline
*/
if (symbol_conf.default_guest_vmlinux_name != NULL) {
err = dso__load_vmlinux(dso, map,
symbol_conf.default_guest_vmlinux_name,
false);
return err;
}
kallsyms_filename = symbol_conf.default_guest_kallsyms;
if (!kallsyms_filename)
return -1;
} else {
sprintf(path, "%s/proc/kallsyms", machine->root_dir);
kallsyms_filename = path;
}
err = dso__load_kallsyms(dso, kallsyms_filename, map);
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
if (err > 0 && !dso__is_kcore(dso)) {
dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
dso__set_long_name(dso, machine->mmap_name, false);
map__fixup_start(map);
map__fixup_end(map);
}
return err;
}
static void vmlinux_path__exit(void)
{
while (--vmlinux_path__nr_entries >= 0)
zfree(&vmlinux_path[vmlinux_path__nr_entries]);
vmlinux_path__nr_entries = 0;
zfree(&vmlinux_path);
}
static const char * const vmlinux_paths[] = {
"vmlinux",
"/boot/vmlinux"
};
static const char * const vmlinux_paths_upd[] = {
"/boot/vmlinux-%s",
"/usr/lib/debug/boot/vmlinux-%s",
"/lib/modules/%s/build/vmlinux",
"/usr/lib/debug/lib/modules/%s/vmlinux",
"/usr/lib/debug/boot/vmlinux-%s.debug"
};
static int vmlinux_path__add(const char *new_entry)
{
vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
return -1;
++vmlinux_path__nr_entries;
return 0;
}
static int vmlinux_path__init(struct perf_env *env)
{
struct utsname uts;
char bf[PATH_MAX];
char *kernel_version;
unsigned int i;
vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
ARRAY_SIZE(vmlinux_paths_upd)));
if (vmlinux_path == NULL)
return -1;
for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
if (vmlinux_path__add(vmlinux_paths[i]) < 0)
goto out_fail;
/* only try kernel version if no symfs was given */
if (symbol_conf.symfs[0] != 0)
return 0;
if (env) {
kernel_version = env->os_release;
} else {
if (uname(&uts) < 0)
goto out_fail;
kernel_version = uts.release;
}
for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
if (vmlinux_path__add(bf) < 0)
goto out_fail;
}
return 0;
out_fail:
vmlinux_path__exit();
return -1;
}
int setup_list(struct strlist **list, const char *list_str,
const char *list_name)
{
if (list_str == NULL)
return 0;
*list = strlist__new(list_str, NULL);
if (!*list) {
pr_err("problems parsing %s list\n", list_name);
return -1;
}
symbol_conf.has_filter = true;
return 0;
}
int setup_intlist(struct intlist **list, const char *list_str,
const char *list_name)
{
if (list_str == NULL)
return 0;
*list = intlist__new(list_str);
if (!*list) {
pr_err("problems parsing %s list\n", list_name);
return -1;
}
return 0;
}
static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list)
{
struct str_node *pos, *tmp;
unsigned long val;
char *sep;
const char *end;
int i = 0, err;
*addr_list = intlist__new(NULL);
if (!*addr_list)
return -1;
strlist__for_each_entry_safe(pos, tmp, sym_list) {
errno = 0;
val = strtoul(pos->s, &sep, 16);
if (errno || (sep == pos->s))
continue;
if (*sep != '\0') {
end = pos->s + strlen(pos->s) - 1;
while (end >= sep && isspace(*end))
end--;
if (end >= sep)
continue;
}
err = intlist__add(*addr_list, val);
if (err)
break;
strlist__remove(sym_list, pos);
i++;
}
if (i == 0) {
intlist__delete(*addr_list);
*addr_list = NULL;
}
return 0;
}
static bool symbol__read_kptr_restrict(void)
{
bool value = false;
FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
if (fp != NULL) {
char line[8];
if (fgets(line, sizeof(line), fp) != NULL)
value = perf_cap__capable(CAP_SYSLOG) ?
(atoi(line) >= 2) :
(atoi(line) != 0);
fclose(fp);
}
/* Per kernel/kallsyms.c:
* we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
*/
if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
value = true;
return value;
}
int symbol__annotation_init(void)
{
if (symbol_conf.init_annotation)
return 0;
if (symbol_conf.initialized) {
pr_err("Annotation needs to be init before symbol__init()\n");
return -1;
}
symbol_conf.priv_size += sizeof(struct annotation);
symbol_conf.init_annotation = true;
return 0;
}
int symbol__init(struct perf_env *env)
{
const char *symfs;
if (symbol_conf.initialized)
return 0;
symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
symbol__elf_init();
if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
return -1;
if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
pr_err("'.' is the only non valid --field-separator argument\n");
return -1;
}
if (setup_list(&symbol_conf.dso_list,
symbol_conf.dso_list_str, "dso") < 0)
return -1;
if (setup_list(&symbol_conf.comm_list,
symbol_conf.comm_list_str, "comm") < 0)
goto out_free_dso_list;
if (setup_intlist(&symbol_conf.pid_list,
symbol_conf.pid_list_str, "pid") < 0)
goto out_free_comm_list;
if (setup_intlist(&symbol_conf.tid_list,
symbol_conf.tid_list_str, "tid") < 0)
goto out_free_pid_list;
if (setup_list(&symbol_conf.sym_list,
symbol_conf.sym_list_str, "symbol") < 0)
goto out_free_tid_list;
if (symbol_conf.sym_list &&
setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0)
goto out_free_sym_list;
if (setup_list(&symbol_conf.bt_stop_list,
symbol_conf.bt_stop_list_str, "symbol") < 0)
goto out_free_sym_list;
/*
* A path to symbols of "/" is identical to ""
* reset here for simplicity.
*/
symfs = realpath(symbol_conf.symfs, NULL);
if (symfs == NULL)
symfs = symbol_conf.symfs;
if (strcmp(symfs, "/") == 0)
symbol_conf.symfs = "";
if (symfs != symbol_conf.symfs)
free((void *)symfs);
symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
symbol_conf.initialized = true;
return 0;
out_free_sym_list:
strlist__delete(symbol_conf.sym_list);
intlist__delete(symbol_conf.addr_list);
out_free_tid_list:
intlist__delete(symbol_conf.tid_list);
out_free_pid_list:
intlist__delete(symbol_conf.pid_list);
out_free_comm_list:
strlist__delete(symbol_conf.comm_list);
out_free_dso_list:
strlist__delete(symbol_conf.dso_list);
return -1;
}
void symbol__exit(void)
{
if (!symbol_conf.initialized)
return;
strlist__delete(symbol_conf.bt_stop_list);
strlist__delete(symbol_conf.sym_list);
strlist__delete(symbol_conf.dso_list);
strlist__delete(symbol_conf.comm_list);
intlist__delete(symbol_conf.tid_list);
intlist__delete(symbol_conf.pid_list);
intlist__delete(symbol_conf.addr_list);
vmlinux_path__exit();
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
symbol_conf.bt_stop_list = NULL;
symbol_conf.initialized = false;
}
int symbol__config_symfs(const struct option *opt __maybe_unused,
const char *dir, int unset __maybe_unused)
{
char *bf = NULL;
int ret;
symbol_conf.symfs = strdup(dir);
if (symbol_conf.symfs == NULL)
return -ENOMEM;
/* skip the locally configured cache if a symfs is given, and
* config buildid dir to symfs/.debug
*/
ret = asprintf(&bf, "%s/%s", dir, ".debug");
if (ret < 0)
return -ENOMEM;
set_buildid_dir(bf);
free(bf);
return 0;
}
struct mem_info *mem_info__get(struct mem_info *mi)
{
if (mi)
refcount_inc(&mi->refcnt);
return mi;
}
void mem_info__put(struct mem_info *mi)
{
if (mi && refcount_dec_and_test(&mi->refcnt))
free(mi);
}
struct mem_info *mem_info__new(void)
{
struct mem_info *mi = zalloc(sizeof(*mi));
if (mi)
refcount_set(&mi->refcnt, 1);
return mi;
}
/*
* Checks that user supplied symbol kernel files are accessible because
* the default mechanism for accessing elf files fails silently. i.e. if
* debug syms for a build ID aren't found perf carries on normally. When
* they are user supplied we should assume that the user doesn't want to
* silently fail.
*/
int symbol__validate_sym_arguments(void)
{
if (symbol_conf.vmlinux_name &&
access(symbol_conf.vmlinux_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
return -EINVAL;
}
if (symbol_conf.kallsyms_name &&
access(symbol_conf.kallsyms_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
return -EINVAL;
}
return 0;
}
| linux-master | tools/perf/util/symbol.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009, Steven Rostedt <[email protected]>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "debug.h"
#include "trace-event.h"
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <traceevent/event-parse.h>
static int get_common_field(struct scripting_context *context,
int *offset, int *size, const char *type)
{
struct tep_handle *pevent = context->pevent;
struct tep_event *event;
struct tep_format_field *field;
if (!*size) {
event = tep_get_first_event(pevent);
if (!event)
return 0;
field = tep_find_common_field(event, type);
if (!field)
return 0;
*offset = field->offset;
*size = field->size;
}
return tep_read_number(pevent, context->event_data + *offset, *size);
}
int common_lock_depth(struct scripting_context *context)
{
static int offset;
static int size;
int ret;
ret = get_common_field(context, &size, &offset,
"common_lock_depth");
if (ret < 0)
return -1;
return ret;
}
int common_flags(struct scripting_context *context)
{
static int offset;
static int size;
int ret;
ret = get_common_field(context, &size, &offset,
"common_flags");
if (ret < 0)
return -1;
return ret;
}
int common_pc(struct scripting_context *context)
{
static int offset;
static int size;
int ret;
ret = get_common_field(context, &size, &offset,
"common_preempt_count");
if (ret < 0)
return -1;
return ret;
}
unsigned long long
raw_field_value(struct tep_event *event, const char *name, void *data)
{
struct tep_format_field *field;
unsigned long long val;
field = tep_find_any_field(event, name);
if (!field)
return 0ULL;
tep_read_number_field(field, data, &val);
return val;
}
unsigned long long read_size(struct tep_event *event, void *ptr, int size)
{
return tep_read_number(event->tep, ptr, size);
}
void event_format__fprintf(struct tep_event *event,
int cpu, void *data, int size, FILE *fp)
{
struct tep_record record;
struct trace_seq s;
memset(&record, 0, sizeof(record));
record.cpu = cpu;
record.size = size;
record.data = data;
trace_seq_init(&s);
tep_print_event(event->tep, &s, &record, "%s", TEP_PRINT_INFO);
trace_seq_do_fprintf(&s, fp);
trace_seq_destroy(&s);
}
void event_format__print(struct tep_event *event,
int cpu, void *data, int size)
{
return event_format__fprintf(event, cpu, data, size, stdout);
}
void parse_ftrace_printk(struct tep_handle *pevent,
char *file, unsigned int size __maybe_unused)
{
unsigned long long addr;
char *printk;
char *line;
char *next = NULL;
char *addr_str;
char *fmt = NULL;
line = strtok_r(file, "\n", &next);
while (line) {
addr_str = strtok_r(line, ":", &fmt);
if (!addr_str) {
pr_warning("printk format with empty entry");
break;
}
addr = strtoull(addr_str, NULL, 16);
/* fmt still has a space, skip it */
printk = strdup(fmt+1);
line = strtok_r(NULL, "\n", &next);
tep_register_print_string(pevent, printk, addr);
free(printk);
}
}
void parse_saved_cmdline(struct tep_handle *pevent,
char *file, unsigned int size __maybe_unused)
{
char comm[17]; /* Max comm length in the kernel is 16. */
char *line;
char *next = NULL;
int pid;
line = strtok_r(file, "\n", &next);
while (line) {
if (sscanf(line, "%d %16s", &pid, comm) == 2)
tep_register_comm(pevent, comm, pid);
line = strtok_r(NULL, "\n", &next);
}
}
int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size)
{
return tep_parse_event(pevent, buf, size, "ftrace");
}
int parse_event_file(struct tep_handle *pevent,
char *buf, unsigned long size, char *sys)
{
return tep_parse_event(pevent, buf, size, sys);
}
struct flag {
const char *name;
unsigned long long value;
};
static const struct flag flags[] = {
{ "HI_SOFTIRQ", 0 },
{ "TIMER_SOFTIRQ", 1 },
{ "NET_TX_SOFTIRQ", 2 },
{ "NET_RX_SOFTIRQ", 3 },
{ "BLOCK_SOFTIRQ", 4 },
{ "IRQ_POLL_SOFTIRQ", 5 },
{ "TASKLET_SOFTIRQ", 6 },
{ "SCHED_SOFTIRQ", 7 },
{ "HRTIMER_SOFTIRQ", 8 },
{ "RCU_SOFTIRQ", 9 },
{ "HRTIMER_NORESTART", 0 },
{ "HRTIMER_RESTART", 1 },
};
unsigned long long eval_flag(const char *flag)
{
int i;
/*
* Some flags in the format files do not get converted.
* If the flag is not numeric, see if it is something that
* we already know about.
*/
if (isdigit(flag[0]))
return strtoull(flag, NULL, 0);
for (i = 0; i < (int)(ARRAY_SIZE(flags)); i++)
if (strcmp(flags[i].name, flag) == 0)
return flags[i].value;
return 0;
}
| linux-master | tools/perf/util/trace-event-parse.c |
// SPDX-License-Identifier: GPL-2.0
/*
* config.c
*
* Helper functions for parsing config items.
* Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
* Copyright (C) Johannes Schindelin, 2005
*
*/
#include <errno.h>
#include <sys/param.h>
#include "cache.h"
#include "callchain.h"
#include <subcmd/exec-cmd.h>
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
#include "util/stat.h" /* perf_stat__set_big_num */
#include "util/evsel.h" /* evsel__hw_names, evsel__use_bpf_counters */
#include "util/srcline.h" /* addr2line_timeout_ms */
#include "build-id.h"
#include "debug.h"
#include "config.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <unistd.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <linux/ctype.h>
#define MAXNAME (256)
#define DEBUG_CACHE_DIR ".debug"
char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */
static FILE *config_file;
static const char *config_file_name;
static int config_linenr;
static int config_file_eof;
static struct perf_config_set *config_set;
const char *config_exclusive_filename;
static int get_next_char(void)
{
int c;
FILE *f;
c = '\n';
if ((f = config_file) != NULL) {
c = fgetc(f);
if (c == '\r') {
/* DOS like systems */
c = fgetc(f);
if (c != '\n') {
ungetc(c, f);
c = '\r';
}
}
if (c == '\n')
config_linenr++;
if (c == EOF) {
config_file_eof = 1;
c = '\n';
}
}
return c;
}
static char *parse_value(void)
{
static char value[1024];
int quote = 0, comment = 0, space = 0;
size_t len = 0;
for (;;) {
int c = get_next_char();
if (len >= sizeof(value) - 1)
return NULL;
if (c == '\n') {
if (quote)
return NULL;
value[len] = 0;
return value;
}
if (comment)
continue;
if (isspace(c) && !quote) {
space = 1;
continue;
}
if (!quote) {
if (c == ';' || c == '#') {
comment = 1;
continue;
}
}
if (space) {
if (len)
value[len++] = ' ';
space = 0;
}
if (c == '\\') {
c = get_next_char();
switch (c) {
case '\n':
continue;
case 't':
c = '\t';
break;
case 'b':
c = '\b';
break;
case 'n':
c = '\n';
break;
/* Some characters escape as themselves */
case '\\': case '"':
break;
/* Reject unknown escape sequences */
default:
return NULL;
}
value[len++] = c;
continue;
}
if (c == '"') {
quote = 1-quote;
continue;
}
value[len++] = c;
}
}
static inline int iskeychar(int c)
{
return isalnum(c) || c == '-' || c == '_';
}
static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
{
int c;
char *value;
/* Get the full name */
for (;;) {
c = get_next_char();
if (config_file_eof)
break;
if (!iskeychar(c))
break;
name[len++] = c;
if (len >= MAXNAME)
return -1;
}
name[len] = 0;
while (c == ' ' || c == '\t')
c = get_next_char();
value = NULL;
if (c != '\n') {
if (c != '=')
return -1;
value = parse_value();
if (!value)
return -1;
}
return fn(name, value, data);
}
static int get_extended_base_var(char *name, int baselen, int c)
{
do {
if (c == '\n')
return -1;
c = get_next_char();
} while (isspace(c));
/* We require the format to be '[base "extension"]' */
if (c != '"')
return -1;
name[baselen++] = '.';
for (;;) {
int ch = get_next_char();
if (ch == '\n')
return -1;
if (ch == '"')
break;
if (ch == '\\') {
ch = get_next_char();
if (ch == '\n')
return -1;
}
name[baselen++] = ch;
if (baselen > MAXNAME / 2)
return -1;
}
/* Final ']' */
if (get_next_char() != ']')
return -1;
return baselen;
}
static int get_base_var(char *name)
{
int baselen = 0;
for (;;) {
int c = get_next_char();
if (config_file_eof)
return -1;
if (c == ']')
return baselen;
if (isspace(c))
return get_extended_base_var(name, baselen, c);
if (!iskeychar(c) && c != '.')
return -1;
if (baselen > MAXNAME / 2)
return -1;
name[baselen++] = tolower(c);
}
}
static int perf_parse_file(config_fn_t fn, void *data)
{
int comment = 0;
int baselen = 0;
static char var[MAXNAME];
/* U+FEFF Byte Order Mark in UTF8 */
static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf";
const unsigned char *bomptr = utf8_bom;
for (;;) {
int line, c = get_next_char();
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
* own, but e.g. Windows Notepad will do it happily. */
if ((unsigned char) c == *bomptr) {
bomptr++;
continue;
} else {
/* Do not tolerate partial BOM. */
if (bomptr != utf8_bom)
break;
/* No BOM at file beginning. Cool. */
bomptr = NULL;
}
}
if (c == '\n') {
if (config_file_eof)
return 0;
comment = 0;
continue;
}
if (comment || isspace(c))
continue;
if (c == '#' || c == ';') {
comment = 1;
continue;
}
if (c == '[') {
baselen = get_base_var(var);
if (baselen <= 0)
break;
var[baselen++] = '.';
var[baselen] = 0;
continue;
}
if (!isalpha(c))
break;
var[baselen] = tolower(c);
/*
* The get_value function might or might not reach the '\n',
* so saving the current line number for error reporting.
*/
line = config_linenr;
if (get_value(fn, data, var, baselen+1) < 0) {
config_linenr = line;
break;
}
}
pr_err("bad config file line %d in %s\n", config_linenr, config_file_name);
return -1;
}
static int parse_unit_factor(const char *end, unsigned long *val)
{
if (!*end)
return 1;
else if (!strcasecmp(end, "k")) {
*val *= 1024;
return 1;
}
else if (!strcasecmp(end, "m")) {
*val *= 1024 * 1024;
return 1;
}
else if (!strcasecmp(end, "g")) {
*val *= 1024 * 1024 * 1024;
return 1;
}
return 0;
}
static int perf_parse_llong(const char *value, long long *ret)
{
if (value && *value) {
char *end;
long long val = strtoll(value, &end, 0);
unsigned long factor = 1;
if (!parse_unit_factor(end, &factor))
return 0;
*ret = val * factor;
return 1;
}
return 0;
}
static int perf_parse_long(const char *value, long *ret)
{
if (value && *value) {
char *end;
long val = strtol(value, &end, 0);
unsigned long factor = 1;
if (!parse_unit_factor(end, &factor))
return 0;
*ret = val * factor;
return 1;
}
return 0;
}
static void bad_config(const char *name)
{
if (config_file_name)
pr_warning("bad config value for '%s' in %s, ignoring...\n", name, config_file_name);
else
pr_warning("bad config value for '%s', ignoring...\n", name);
}
int perf_config_u64(u64 *dest, const char *name, const char *value)
{
long long ret = 0;
if (!perf_parse_llong(value, &ret)) {
bad_config(name);
return -1;
}
*dest = ret;
return 0;
}
int perf_config_int(int *dest, const char *name, const char *value)
{
long ret = 0;
if (!perf_parse_long(value, &ret)) {
bad_config(name);
return -1;
}
*dest = ret;
return 0;
}
int perf_config_u8(u8 *dest, const char *name, const char *value)
{
long ret = 0;
if (!perf_parse_long(value, &ret)) {
bad_config(name);
return -1;
}
*dest = ret;
return 0;
}
static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
{
int ret;
*is_bool = 1;
if (!value)
return 1;
if (!*value)
return 0;
if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on"))
return 1;
if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off"))
return 0;
*is_bool = 0;
return perf_config_int(&ret, name, value) < 0 ? -1 : ret;
}
int perf_config_bool(const char *name, const char *value)
{
int discard;
return !!perf_config_bool_or_int(name, value, &discard);
}
static const char *perf_config_dirname(const char *name, const char *value)
{
if (!name)
return NULL;
return value;
}
static int perf_buildid_config(const char *var, const char *value)
{
/* same dir for all commands */
if (!strcmp(var, "buildid.dir")) {
const char *dir = perf_config_dirname(var, value);
if (!dir) {
pr_err("Invalid buildid directory!\n");
return -1;
}
strncpy(buildid_dir, dir, MAXPATHLEN-1);
buildid_dir[MAXPATHLEN-1] = '\0';
}
return 0;
}
static int perf_default_core_config(const char *var, const char *value)
{
if (!strcmp(var, "core.proc-map-timeout"))
proc_map_timeout = strtoul(value, NULL, 10);
if (!strcmp(var, "core.addr2line-timeout"))
addr2line_timeout_ms = strtoul(value, NULL, 10);
/* Add other config variables here. */
return 0;
}
static int perf_ui_config(const char *var, const char *value)
{
/* Add other config variables here. */
if (!strcmp(var, "ui.show-headers"))
symbol_conf.show_hist_headers = perf_config_bool(var, value);
return 0;
}
static int perf_stat_config(const char *var, const char *value)
{
if (!strcmp(var, "stat.big-num"))
perf_stat__set_big_num(perf_config_bool(var, value));
if (!strcmp(var, "stat.no-csv-summary"))
perf_stat__set_no_csv_summary(perf_config_bool(var, value));
if (!strcmp(var, "stat.bpf-counter-events"))
evsel__bpf_counter_events = strdup(value);
/* Add other config variables here. */
return 0;
}
int perf_default_config(const char *var, const char *value,
void *dummy __maybe_unused)
{
if (strstarts(var, "core."))
return perf_default_core_config(var, value);
if (strstarts(var, "hist."))
return perf_hist_config(var, value);
if (strstarts(var, "ui."))
return perf_ui_config(var, value);
if (strstarts(var, "call-graph."))
return perf_callchain_config(var, value);
if (strstarts(var, "buildid."))
return perf_buildid_config(var, value);
if (strstarts(var, "stat."))
return perf_stat_config(var, value);
/* Add other config variables here. */
return 0;
}
static int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
{
int ret;
FILE *f = fopen(filename, "r");
ret = -1;
if (f) {
config_file = f;
config_file_name = filename;
config_linenr = 1;
config_file_eof = 0;
ret = perf_parse_file(fn, data);
fclose(f);
config_file_name = NULL;
}
return ret;
}
const char *perf_etc_perfconfig(void)
{
static const char *system_wide;
if (!system_wide)
system_wide = system_path(ETC_PERFCONFIG);
return system_wide;
}
static int perf_env_bool(const char *k, int def)
{
const char *v = getenv(k);
return v ? perf_config_bool(k, v) : def;
}
int perf_config_system(void)
{
return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0);
}
int perf_config_global(void)
{
return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0);
}
static char *home_perfconfig(void)
{
const char *home = NULL;
char *config;
struct stat st;
char path[PATH_MAX];
home = getenv("HOME");
/*
* Skip reading user config if:
* - there is no place to read it from (HOME)
* - we are asked not to (PERF_CONFIG_NOGLOBAL=1)
*/
if (!home || !*home || !perf_config_global())
return NULL;
config = strdup(mkpath(path, sizeof(path), "%s/.perfconfig", home));
if (config == NULL) {
pr_warning("Not enough memory to process %s/.perfconfig, ignoring it.\n", home);
return NULL;
}
if (stat(config, &st) < 0)
goto out_free;
if (st.st_uid && (st.st_uid != geteuid())) {
pr_warning("File %s not owned by current user or root, ignoring it.\n", config);
goto out_free;
}
if (st.st_size)
return config;
out_free:
free(config);
return NULL;
}
const char *perf_home_perfconfig(void)
{
static const char *config;
static bool failed;
if (failed || config)
return config;
config = home_perfconfig();
if (!config)
failed = true;
return config;
}
static struct perf_config_section *find_section(struct list_head *sections,
const char *section_name)
{
struct perf_config_section *section;
list_for_each_entry(section, sections, node)
if (!strcmp(section->name, section_name))
return section;
return NULL;
}
static struct perf_config_item *find_config_item(const char *name,
struct perf_config_section *section)
{
struct perf_config_item *item;
list_for_each_entry(item, §ion->items, node)
if (!strcmp(item->name, name))
return item;
return NULL;
}
static struct perf_config_section *add_section(struct list_head *sections,
const char *section_name)
{
struct perf_config_section *section = zalloc(sizeof(*section));
if (!section)
return NULL;
INIT_LIST_HEAD(§ion->items);
section->name = strdup(section_name);
if (!section->name) {
pr_debug("%s: strdup failed\n", __func__);
free(section);
return NULL;
}
list_add_tail(§ion->node, sections);
return section;
}
static struct perf_config_item *add_config_item(struct perf_config_section *section,
const char *name)
{
struct perf_config_item *item = zalloc(sizeof(*item));
if (!item)
return NULL;
item->name = strdup(name);
if (!item->name) {
pr_debug("%s: strdup failed\n", __func__);
free(item);
return NULL;
}
list_add_tail(&item->node, §ion->items);
return item;
}
static int set_value(struct perf_config_item *item, const char *value)
{
char *val = strdup(value);
if (!val)
return -1;
zfree(&item->value);
item->value = val;
return 0;
}
static int collect_config(const char *var, const char *value,
void *perf_config_set)
{
int ret = -1;
char *ptr, *key;
char *section_name, *name;
struct perf_config_section *section = NULL;
struct perf_config_item *item = NULL;
struct perf_config_set *set = perf_config_set;
struct list_head *sections;
if (set == NULL)
return -1;
sections = &set->sections;
key = ptr = strdup(var);
if (!key) {
pr_debug("%s: strdup failed\n", __func__);
return -1;
}
section_name = strsep(&ptr, ".");
name = ptr;
if (name == NULL || value == NULL)
goto out_free;
section = find_section(sections, section_name);
if (!section) {
section = add_section(sections, section_name);
if (!section)
goto out_free;
}
item = find_config_item(name, section);
if (!item) {
item = add_config_item(section, name);
if (!item)
goto out_free;
}
/* perf_config_set can contain both user and system config items.
* So we should know where each value is from.
* The classification would be needed when a particular config file
* is overwritten by setting feature i.e. set_config().
*/
if (strcmp(config_file_name, perf_etc_perfconfig()) == 0) {
section->from_system_config = true;
item->from_system_config = true;
} else {
section->from_system_config = false;
item->from_system_config = false;
}
ret = set_value(item, value);
out_free:
free(key);
return ret;
}
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
const char *var, const char *value)
{
config_file_name = file_name;
return collect_config(var, value, set);
}
static int perf_config_set__init(struct perf_config_set *set)
{
int ret = -1;
/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
if (config_exclusive_filename)
return perf_config_from_file(collect_config, config_exclusive_filename, set);
if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
if (perf_config_from_file(collect_config, perf_etc_perfconfig(), set) < 0)
goto out;
}
if (perf_config_global() && perf_home_perfconfig()) {
if (perf_config_from_file(collect_config, perf_home_perfconfig(), set) < 0)
goto out;
}
out:
return ret;
}
struct perf_config_set *perf_config_set__new(void)
{
struct perf_config_set *set = zalloc(sizeof(*set));
if (set) {
INIT_LIST_HEAD(&set->sections);
perf_config_set__init(set);
}
return set;
}
struct perf_config_set *perf_config_set__load_file(const char *file)
{
struct perf_config_set *set = zalloc(sizeof(*set));
if (set) {
INIT_LIST_HEAD(&set->sections);
perf_config_from_file(collect_config, file, set);
}
return set;
}
static int perf_config__init(void)
{
if (config_set == NULL)
config_set = perf_config_set__new();
return config_set == NULL;
}
int perf_config_set(struct perf_config_set *set,
config_fn_t fn, void *data)
{
int ret = 0;
char key[BUFSIZ];
struct perf_config_section *section;
struct perf_config_item *item;
perf_config_set__for_each_entry(set, section, item) {
char *value = item->value;
if (value) {
scnprintf(key, sizeof(key), "%s.%s",
section->name, item->name);
ret = fn(key, value, data);
if (ret < 0) {
pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",
key, value);
/*
* Can't be just a 'break', as perf_config_set__for_each_entry()
* expands to two nested for() loops.
*/
goto out;
}
}
}
out:
return ret;
}
int perf_config(config_fn_t fn, void *data)
{
if (config_set == NULL && perf_config__init())
return -1;
return perf_config_set(config_set, fn, data);
}
void perf_config__exit(void)
{
perf_config_set__delete(config_set);
config_set = NULL;
}
void perf_config__refresh(void)
{
perf_config__exit();
perf_config__init();
}
static void perf_config_item__delete(struct perf_config_item *item)
{
zfree(&item->name);
zfree(&item->value);
free(item);
}
static void perf_config_section__purge(struct perf_config_section *section)
{
struct perf_config_item *item, *tmp;
list_for_each_entry_safe(item, tmp, §ion->items, node) {
list_del_init(&item->node);
perf_config_item__delete(item);
}
}
static void perf_config_section__delete(struct perf_config_section *section)
{
perf_config_section__purge(section);
zfree(§ion->name);
free(section);
}
static void perf_config_set__purge(struct perf_config_set *set)
{
struct perf_config_section *section, *tmp;
list_for_each_entry_safe(section, tmp, &set->sections, node) {
list_del_init(§ion->node);
perf_config_section__delete(section);
}
}
void perf_config_set__delete(struct perf_config_set *set)
{
if (set == NULL)
return;
perf_config_set__purge(set);
free(set);
}
/*
* Call this to report error for your variable that should not
* get a boolean value (i.e. "[my] var" means "true").
*/
int config_error_nonbool(const char *var)
{
pr_err("Missing value for '%s'", var);
return -1;
}
void set_buildid_dir(const char *dir)
{
if (dir)
scnprintf(buildid_dir, MAXPATHLEN, "%s", dir);
/* default to $HOME/.debug */
if (buildid_dir[0] == '\0') {
char *home = getenv("HOME");
if (home) {
snprintf(buildid_dir, MAXPATHLEN, "%s/%s",
home, DEBUG_CACHE_DIR);
} else {
strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
}
buildid_dir[MAXPATHLEN-1] = '\0';
}
/* for communicating with external commands */
setenv("PERF_BUILDID_DIR", buildid_dir, 1);
}
struct perf_config_scan_data {
const char *name;
const char *fmt;
va_list args;
int ret;
};
static int perf_config_scan_cb(const char *var, const char *value, void *data)
{
struct perf_config_scan_data *d = data;
if (!strcmp(var, d->name))
d->ret = vsscanf(value, d->fmt, d->args);
return 0;
}
int perf_config_scan(const char *name, const char *fmt, ...)
{
struct perf_config_scan_data d = {
.name = name,
.fmt = fmt,
};
va_start(d.args, fmt);
perf_config(perf_config_scan_cb, &d);
va_end(d.args);
return d.ret;
}
| linux-master | tools/perf/util/config.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include "debug.h"
#include "dso.h"
#include "map.h"
#include "namespaces.h"
#include "srcline.h"
#include "symbol.h"
#include "thread.h"
#include "vdso.h"
static inline int is_android_lib(const char *filename)
{
return strstarts(filename, "/data/app-lib/") ||
strstarts(filename, "/system/lib/");
}
static inline bool replace_android_lib(const char *filename, char *newfilename)
{
const char *libname;
char *app_abi;
size_t app_abi_length, new_length;
size_t lib_length = 0;
libname = strrchr(filename, '/');
if (libname)
lib_length = strlen(libname);
app_abi = getenv("APP_ABI");
if (!app_abi)
return false;
app_abi_length = strlen(app_abi);
if (strstarts(filename, "/data/app-lib/")) {
char *apk_path;
if (!app_abi_length)
return false;
new_length = 7 + app_abi_length + lib_length;
apk_path = getenv("APK_PATH");
if (apk_path) {
new_length += strlen(apk_path) + 1;
if (new_length > PATH_MAX)
return false;
snprintf(newfilename, new_length,
"%s/libs/%s/%s", apk_path, app_abi, libname);
} else {
if (new_length > PATH_MAX)
return false;
snprintf(newfilename, new_length,
"libs/%s/%s", app_abi, libname);
}
return true;
}
if (strstarts(filename, "/system/lib/")) {
char *ndk, *app;
const char *arch;
int ndk_length, app_length;
ndk = getenv("NDK_ROOT");
app = getenv("APP_PLATFORM");
if (!(ndk && app))
return false;
ndk_length = strlen(ndk);
app_length = strlen(app);
if (!(ndk_length && app_length && app_abi_length))
return false;
arch = !strncmp(app_abi, "arm", 3) ? "arm" :
!strncmp(app_abi, "mips", 4) ? "mips" :
!strncmp(app_abi, "x86", 3) ? "x86" : NULL;
if (!arch)
return false;
new_length = 27 + ndk_length +
app_length + lib_length
+ strlen(arch);
if (new_length > PATH_MAX)
return false;
snprintf(newfilename, new_length,
"%.*s/platforms/%.*s/arch-%s/usr/lib/%s",
ndk_length, ndk, app_length, app, arch, libname);
return true;
}
return false;
}
void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
{
map__set_start(map, start);
map__set_end(map, end);
map__set_pgoff(map, pgoff);
map__set_reloc(map, 0);
map__set_dso(map, dso__get(dso));
map__set_map_ip(map, map__dso_map_ip);
map__set_unmap_ip(map, map__dso_unmap_ip);
map__set_erange_warned(map, false);
refcount_set(map__refcnt(map), 1);
}
struct map *map__new(struct machine *machine, u64 start, u64 len,
u64 pgoff, struct dso_id *id,
u32 prot, u32 flags, struct build_id *bid,
char *filename, struct thread *thread)
{
struct map *result;
RC_STRUCT(map) *map;
struct nsinfo *nsi = NULL;
struct nsinfo *nnsi;
map = malloc(sizeof(*map));
if (ADD_RC_CHK(result, map)) {
char newfilename[PATH_MAX];
struct dso *dso, *header_bid_dso;
int anon, no_dso, vdso, android;
android = is_android_lib(filename);
anon = is_anon_memory(filename) || flags & MAP_HUGETLB;
vdso = is_vdso_map(filename);
no_dso = is_no_dso_memory(filename);
map->prot = prot;
map->flags = flags;
nsi = nsinfo__get(thread__nsinfo(thread));
if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
snprintf(newfilename, sizeof(newfilename),
"/tmp/perf-%d.map", nsinfo__pid(nsi));
filename = newfilename;
}
if (android) {
if (replace_android_lib(filename, newfilename))
filename = newfilename;
}
if (vdso) {
/* The vdso maps are always on the host and not the
* container. Ensure that we don't use setns to look
* them up.
*/
nnsi = nsinfo__copy(nsi);
if (nnsi) {
nsinfo__put(nsi);
nsinfo__clear_need_setns(nnsi);
nsi = nnsi;
}
pgoff = 0;
dso = machine__findnew_vdso(machine, thread);
} else
dso = machine__findnew_dso_id(machine, filename, id);
if (dso == NULL)
goto out_delete;
map__init(result, start, start + len, pgoff, dso);
if (anon || no_dso) {
map->map_ip = map->unmap_ip = identity__map_ip;
/*
* Set memory without DSO as loaded. All map__find_*
* functions still return NULL, and we avoid the
* unnecessary map__load warning.
*/
if (!(prot & PROT_EXEC))
dso__set_loaded(dso);
}
mutex_lock(&dso->lock);
nsinfo__put(dso->nsinfo);
dso->nsinfo = nsi;
mutex_unlock(&dso->lock);
if (build_id__is_defined(bid)) {
dso__set_build_id(dso, bid);
} else {
/*
* If the mmap event had no build ID, search for an existing dso from the
* build ID header by name. Otherwise only the dso loaded at the time of
* reading the header will have the build ID set and all future mmaps will
* have it missing.
*/
down_read(&machine->dsos.lock);
header_bid_dso = __dsos__find(&machine->dsos, filename, false);
up_read(&machine->dsos.lock);
if (header_bid_dso && header_bid_dso->header_build_id) {
dso__set_build_id(dso, &header_bid_dso->bid);
dso->header_build_id = 1;
}
}
dso__put(dso);
}
return result;
out_delete:
nsinfo__put(nsi);
RC_CHK_FREE(result);
return NULL;
}
/*
* Constructor variant for modules (where we know from /proc/modules where
* they are loaded) and for vmlinux, where only after we load all the
* symbols we'll know where it starts and ends.
*/
struct map *map__new2(u64 start, struct dso *dso)
{
struct map *result;
RC_STRUCT(map) *map;
map = calloc(1, sizeof(*map) + (dso->kernel ? sizeof(struct kmap) : 0));
if (ADD_RC_CHK(result, map)) {
/*
* ->end will be filled after we load all the symbols
*/
map__init(result, start, 0, 0, dso);
}
return result;
}
bool __map__is_kernel(const struct map *map)
{
if (!map__dso(map)->kernel)
return false;
return machine__kernel_map(maps__machine(map__kmaps((struct map *)map))) == map;
}
bool __map__is_extra_kernel_map(const struct map *map)
{
struct kmap *kmap = __map__kmap((struct map *)map);
return kmap && kmap->name[0];
}
bool __map__is_bpf_prog(const struct map *map)
{
const char *name;
struct dso *dso = map__dso(map);
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
return true;
/*
* If PERF_RECORD_BPF_EVENT is not included, the dso will not have
* type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
* guess the type based on name.
*/
name = dso->short_name;
return name && (strstr(name, "bpf_prog_") == name);
}
bool __map__is_bpf_image(const struct map *map)
{
const char *name;
struct dso *dso = map__dso(map);
if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
return true;
/*
* If PERF_RECORD_KSYMBOL is not included, the dso will not have
* type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
* guess the type based on name.
*/
name = dso->short_name;
return name && is_bpf_image(name);
}
bool __map__is_ool(const struct map *map)
{
const struct dso *dso = map__dso(map);
return dso && dso->binary_type == DSO_BINARY_TYPE__OOL;
}
bool map__has_symbols(const struct map *map)
{
return dso__has_symbols(map__dso(map));
}
static void map__exit(struct map *map)
{
BUG_ON(refcount_read(map__refcnt(map)) != 0);
dso__zput(RC_CHK_ACCESS(map)->dso);
}
void map__delete(struct map *map)
{
map__exit(map);
RC_CHK_FREE(map);
}
void map__put(struct map *map)
{
if (map && refcount_dec_and_test(map__refcnt(map)))
map__delete(map);
else
RC_CHK_PUT(map);
}
void map__fixup_start(struct map *map)
{
struct dso *dso = map__dso(map);
struct rb_root_cached *symbols = &dso->symbols;
struct rb_node *nd = rb_first_cached(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map__set_start(map, sym->start);
}
}
void map__fixup_end(struct map *map)
{
struct dso *dso = map__dso(map);
struct rb_root_cached *symbols = &dso->symbols;
struct rb_node *nd = rb_last(&symbols->rb_root);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map__set_end(map, sym->end);
}
}
#define DSO__DELETED "(deleted)"
int map__load(struct map *map)
{
struct dso *dso = map__dso(map);
const char *name = dso->long_name;
int nr;
if (dso__loaded(dso))
return 0;
nr = dso__load(dso, map);
if (nr < 0) {
if (dso->has_build_id) {
char sbuild_id[SBUILD_ID_SIZE];
build_id__sprintf(&dso->bid, sbuild_id);
pr_debug("%s with build id %s not found", name, sbuild_id);
} else
pr_debug("Failed to open %s", name);
pr_debug(", continuing without symbols\n");
return -1;
} else if (nr == 0) {
#ifdef HAVE_LIBELF_SUPPORT
const size_t len = strlen(name);
const size_t real_len = len - sizeof(DSO__DELETED);
if (len > sizeof(DSO__DELETED) &&
strcmp(name + real_len + 1, DSO__DELETED) == 0) {
pr_debug("%.*s was updated (is prelink enabled?). "
"Restart the long running apps that use it!\n",
(int)real_len, name);
} else {
pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
}
#endif
return -1;
}
return 0;
}
struct symbol *map__find_symbol(struct map *map, u64 addr)
{
if (map__load(map) < 0)
return NULL;
return dso__find_symbol(map__dso(map), addr);
}
struct symbol *map__find_symbol_by_name_idx(struct map *map, const char *name, size_t *idx)
{
struct dso *dso;
if (map__load(map) < 0)
return NULL;
dso = map__dso(map);
dso__sort_by_name(dso);
return dso__find_symbol_by_name(dso, name, idx);
}
struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
{
size_t idx;
return map__find_symbol_by_name_idx(map, name, &idx);
}
struct map *map__clone(struct map *from)
{
struct map *result;
RC_STRUCT(map) *map;
size_t size = sizeof(RC_STRUCT(map));
struct dso *dso = map__dso(from);
if (dso && dso->kernel)
size += sizeof(struct kmap);
map = memdup(RC_CHK_ACCESS(from), size);
if (ADD_RC_CHK(result, map)) {
refcount_set(&map->refcnt, 1);
map->dso = dso__get(dso);
}
return result;
}
size_t map__fprintf(struct map *map, FILE *fp)
{
const struct dso *dso = map__dso(map);
return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
map__start(map), map__end(map), map__pgoff(map), dso->name);
}
static bool prefer_dso_long_name(const struct dso *dso, bool print_off)
{
return dso->long_name &&
(symbol_conf.show_kernel_path ||
(print_off && (dso->name[0] == '[' || dso__is_kcore(dso))));
}
static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp)
{
char buf[symbol_conf.pad_output_len_dso + 1];
const char *dsoname = "[unknown]";
const struct dso *dso = map ? map__dso(map) : NULL;
if (dso) {
if (prefer_dso_long_name(dso, print_off))
dsoname = dso->long_name;
else
dsoname = dso->name;
}
if (symbol_conf.pad_output_len_dso) {
scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname);
dsoname = buf;
}
return fprintf(fp, "%s", dsoname);
}
size_t map__fprintf_dsoname(struct map *map, FILE *fp)
{
return __map__fprintf_dsoname(map, false, fp);
}
size_t map__fprintf_dsoname_dsoff(struct map *map, bool print_off, u64 addr, FILE *fp)
{
const struct dso *dso = map ? map__dso(map) : NULL;
int printed = 0;
if (print_off && (!dso || !dso__is_object_file(dso)))
print_off = false;
printed += fprintf(fp, " (");
printed += __map__fprintf_dsoname(map, print_off, fp);
if (print_off)
printed += fprintf(fp, "+0x%" PRIx64, addr);
printed += fprintf(fp, ")");
return printed;
}
char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
{
if (map == NULL)
return SRCLINE_UNKNOWN;
return get_srcline(map__dso(map), map__rip_2objdump(map, addr), sym, true, true, addr);
}
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp)
{
const struct dso *dso = map ? map__dso(map) : NULL;
int ret = 0;
if (dso) {
char *srcline = map__srcline(map, addr, NULL);
if (srcline != SRCLINE_UNKNOWN)
ret = fprintf(fp, "%s%s", prefix, srcline);
zfree_srcline(&srcline);
}
return ret;
}
void srccode_state_free(struct srccode_state *state)
{
zfree(&state->srcfile);
state->line = 0;
}
/**
* map__rip_2objdump - convert symbol start address to objdump address.
* @map: memory map
* @rip: symbol start address
*
* objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
* map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
* relative to section start.
*
* Return: Address suitable for passing to "objdump --start-address="
*/
u64 map__rip_2objdump(struct map *map, u64 rip)
{
struct kmap *kmap = __map__kmap(map);
const struct dso *dso = map__dso(map);
/*
* vmlinux does not have program headers for PTI entry trampolines and
* kcore may not either. However the trampoline object code is on the
* main kernel map, so just use that instead.
*/
if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps) {
struct machine *machine = maps__machine(kmap->kmaps);
if (machine) {
struct map *kernel_map = machine__kernel_map(machine);
if (kernel_map)
map = kernel_map;
}
}
if (!dso->adjust_symbols)
return rip;
if (dso->rel)
return rip - map__pgoff(map);
/*
* kernel modules also have DSO_TYPE_USER in dso->kernel,
* but all kernel modules are ET_REL, so won't get here.
*/
if (dso->kernel == DSO_SPACE__USER)
return rip + dso->text_offset;
return map__unmap_ip(map, rip) - map__reloc(map);
}
/**
* map__objdump_2mem - convert objdump address to a memory address.
* @map: memory map
* @ip: objdump address
*
* Closely related to map__rip_2objdump(), this function takes an address from
* objdump and converts it to a memory address. Note this assumes that @map
* contains the address. To be sure the result is valid, check it forwards
* e.g. map__rip_2objdump(map__map_ip(map, map__objdump_2mem(map, ip))) == ip
*
* Return: Memory address.
*/
u64 map__objdump_2mem(struct map *map, u64 ip)
{
const struct dso *dso = map__dso(map);
if (!dso->adjust_symbols)
return map__unmap_ip(map, ip);
if (dso->rel)
return map__unmap_ip(map, ip + map__pgoff(map));
/*
* kernel modules also have DSO_TYPE_USER in dso->kernel,
* but all kernel modules are ET_REL, so won't get here.
*/
if (dso->kernel == DSO_SPACE__USER)
return map__unmap_ip(map, ip - dso->text_offset);
return ip + map__reloc(map);
}
bool map__contains_symbol(const struct map *map, const struct symbol *sym)
{
u64 ip = map__unmap_ip(map, sym->start);
return ip >= map__start(map) && ip < map__end(map);
}
struct kmap *__map__kmap(struct map *map)
{
const struct dso *dso = map__dso(map);
if (!dso || !dso->kernel)
return NULL;
return (struct kmap *)(&RC_CHK_ACCESS(map)[1]);
}
struct kmap *map__kmap(struct map *map)
{
struct kmap *kmap = __map__kmap(map);
if (!kmap)
pr_err("Internal error: map__kmap with a non-kernel map\n");
return kmap;
}
struct maps *map__kmaps(struct map *map)
{
struct kmap *kmap = map__kmap(map);
if (!kmap || !kmap->kmaps) {
pr_err("Internal error: map__kmaps with a non-kernel map\n");
return NULL;
}
return kmap->kmaps;
}
u64 map__dso_map_ip(const struct map *map, u64 ip)
{
return ip - map__start(map) + map__pgoff(map);
}
u64 map__dso_unmap_ip(const struct map *map, u64 ip)
{
return ip + map__start(map) - map__pgoff(map);
}
u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip)
{
return ip;
}
| linux-master | tools/perf/util/map.c |
#include <errno.h>
#include <inttypes.h>
#include <asm/bug.h>
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include "debug.h"
#include "env.h"
#include "mem2node.h"
struct phys_entry {
struct rb_node rb_node;
u64 start;
u64 end;
u64 node;
};
static void phys_entry__insert(struct phys_entry *entry, struct rb_root *root)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct phys_entry *e;
while (*p != NULL) {
parent = *p;
e = rb_entry(parent, struct phys_entry, rb_node);
if (entry->start < e->start)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&entry->rb_node, parent, p);
rb_insert_color(&entry->rb_node, root);
}
static void
phys_entry__init(struct phys_entry *entry, u64 start, u64 bsize, u64 node)
{
entry->start = start;
entry->end = start + bsize;
entry->node = node;
RB_CLEAR_NODE(&entry->rb_node);
}
int mem2node__init(struct mem2node *map, struct perf_env *env)
{
struct memory_node *n, *nodes = &env->memory_nodes[0];
struct phys_entry *entries, *tmp_entries;
u64 bsize = env->memory_bsize;
int i, j = 0, max = 0;
memset(map, 0x0, sizeof(*map));
map->root = RB_ROOT;
for (i = 0; i < env->nr_memory_nodes; i++) {
n = &nodes[i];
max += bitmap_weight(n->set, n->size);
}
entries = zalloc(sizeof(*entries) * max);
if (!entries)
return -ENOMEM;
for (i = 0; i < env->nr_memory_nodes; i++) {
u64 bit;
n = &nodes[i];
for (bit = 0; bit < n->size; bit++) {
u64 start;
if (!test_bit(bit, n->set))
continue;
start = bit * bsize;
/*
* Merge nearby areas, we walk in order
* through the bitmap, so no need to sort.
*/
if (j > 0) {
struct phys_entry *prev = &entries[j - 1];
if ((prev->end == start) &&
(prev->node == n->node)) {
prev->end += bsize;
continue;
}
}
phys_entry__init(&entries[j++], start, bsize, n->node);
}
}
/* Cut unused entries, due to merging. */
tmp_entries = realloc(entries, sizeof(*entries) * j);
if (tmp_entries ||
WARN_ONCE(j == 0, "No memory nodes, is CONFIG_MEMORY_HOTPLUG enabled?\n"))
entries = tmp_entries;
for (i = 0; i < j; i++) {
pr_debug("mem2node %03" PRIu64 " [0x%016" PRIx64 "-0x%016" PRIx64 "]\n",
entries[i].node, entries[i].start, entries[i].end);
phys_entry__insert(&entries[i], &map->root);
}
map->entries = entries;
return 0;
}
void mem2node__exit(struct mem2node *map)
{
zfree(&map->entries);
}
int mem2node__node(struct mem2node *map, u64 addr)
{
struct rb_node **p, *parent = NULL;
struct phys_entry *entry;
p = &map->root.rb_node;
while (*p != NULL) {
parent = *p;
entry = rb_entry(parent, struct phys_entry, rb_node);
if (addr < entry->start)
p = &(*p)->rb_left;
else if (addr >= entry->end)
p = &(*p)->rb_right;
else
goto out;
}
entry = NULL;
out:
return entry ? (int) entry->node : -1;
}
| linux-master | tools/perf/util/mem2node.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <stdlib.h>
#include "util/string2.h"
#include "demangle-ocaml.h"
#include <linux/ctype.h>
static const char *caml_prefix = "caml";
static const size_t caml_prefix_len = 4;
/* mangled OCaml symbols start with "caml" followed by an upper-case letter */
static bool
ocaml_is_mangled(const char *sym)
{
return 0 == strncmp(sym, caml_prefix, caml_prefix_len)
&& isupper(sym[caml_prefix_len]);
}
/*
* input:
* sym: a symbol which may have been mangled by the OCaml compiler
* return:
* if the input doesn't look like a mangled OCaml symbol, NULL is returned
* otherwise, a newly allocated string containing the demangled symbol is returned
*/
char *
ocaml_demangle_sym(const char *sym)
{
char *result;
int j = 0;
int i;
int len;
if (!ocaml_is_mangled(sym)) {
return NULL;
}
len = strlen(sym);
/* the demangled symbol is always smaller than the mangled symbol */
result = malloc(len + 1);
if (!result)
return NULL;
/* skip "caml" prefix */
i = caml_prefix_len;
while (i < len) {
if (sym[i] == '_' && sym[i + 1] == '_') {
/* "__" -> "." */
result[j++] = '.';
i += 2;
}
else if (sym[i] == '$' && isxdigit(sym[i + 1]) && isxdigit(sym[i + 2])) {
/* "$xx" is a hex-encoded character */
result[j++] = (hex(sym[i + 1]) << 4) | hex(sym[i + 2]);
i += 3;
}
else {
result[j++] = sym[i++];
}
}
result[j] = '\0';
return result;
}
| linux-master | tools/perf/util/demangle-ocaml.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include "string2.h"
#include <sys/param.h>
#include <sys/types.h>
#include <byteswap.h>
#include <unistd.h>
#include <regex.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/stringify.h>
#include <linux/zalloc.h>
#include <sys/stat.h>
#include <sys/utsname.h>
#include <linux/time64.h>
#include <dirent.h>
#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/libbpf.h>
#endif
#include <perf/cpumap.h>
#include <tools/libc_compat.h> // reallocarray
#include "dso.h"
#include "evlist.h"
#include "evsel.h"
#include "util/evsel_fprintf.h"
#include "header.h"
#include "memswap.h"
#include "trace-event.h"
#include "session.h"
#include "symbol.h"
#include "debug.h"
#include "cpumap.h"
#include "pmu.h"
#include "pmus.h"
#include "vdso.h"
#include "strbuf.h"
#include "build-id.h"
#include "data.h"
#include <api/fs/fs.h>
#include "asm/bug.h"
#include "tool.h"
#include "time-utils.h"
#include "units.h"
#include "util/util.h" // perf_exe()
#include "cputopo.h"
#include "bpf-event.h"
#include "bpf-utils.h"
#include "clockid.h"
#include <linux/ctype.h>
#include <internal/lib.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
/*
* magic2 = "PERFILE2"
* must be a numerical value to let the endianness
* determine the memory layout. That way we are able
* to detect endianness when reading the perf.data file
* back.
*
* we check for legacy (PERFFILE) format.
*/
static const char *__perf_magic1 = "PERFFILE";
static const u64 __perf_magic2 = 0x32454c4946524550ULL;
static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
#define PERF_MAGIC __perf_magic2
const char perf_version_string[] = PERF_VERSION;
struct perf_file_attr {
struct perf_event_attr attr;
struct perf_file_section ids;
};
void perf_header__set_feat(struct perf_header *header, int feat)
{
__set_bit(feat, header->adds_features);
}
void perf_header__clear_feat(struct perf_header *header, int feat)
{
__clear_bit(feat, header->adds_features);
}
bool perf_header__has_feat(const struct perf_header *header, int feat)
{
return test_bit(feat, header->adds_features);
}
static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
{
ssize_t ret = writen(ff->fd, buf, size);
if (ret != (ssize_t)size)
return ret < 0 ? (int)ret : -1;
return 0;
}
static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
{
/* struct perf_event_header::size is u16 */
const size_t max_size = 0xffff - sizeof(struct perf_event_header);
size_t new_size = ff->size;
void *addr;
if (size + ff->offset > max_size)
return -E2BIG;
while (size > (new_size - ff->offset))
new_size <<= 1;
new_size = min(max_size, new_size);
if (ff->size < new_size) {
addr = realloc(ff->buf, new_size);
if (!addr)
return -ENOMEM;
ff->buf = addr;
ff->size = new_size;
}
memcpy(ff->buf + ff->offset, buf, size);
ff->offset += size;
return 0;
}
/* Return: 0 if succeeded, -ERR if failed. */
int do_write(struct feat_fd *ff, const void *buf, size_t size)
{
if (!ff->buf)
return __do_write_fd(ff, buf, size);
return __do_write_buf(ff, buf, size);
}
/* Return: 0 if succeeded, -ERR if failed. */
static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
{
u64 *p = (u64 *) set;
int i, ret;
ret = do_write(ff, &size, sizeof(size));
if (ret < 0)
return ret;
for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
ret = do_write(ff, p + i, sizeof(*p));
if (ret < 0)
return ret;
}
return 0;
}
/* Return: 0 if succeeded, -ERR if failed. */
int write_padded(struct feat_fd *ff, const void *bf,
size_t count, size_t count_aligned)
{
static const char zero_buf[NAME_ALIGN];
int err = do_write(ff, bf, count);
if (!err)
err = do_write(ff, zero_buf, count_aligned - count);
return err;
}
#define string_size(str) \
(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
/* Return: 0 if succeeded, -ERR if failed. */
static int do_write_string(struct feat_fd *ff, const char *str)
{
u32 len, olen;
int ret;
olen = strlen(str) + 1;
len = PERF_ALIGN(olen, NAME_ALIGN);
/* write len, incl. \0 */
ret = do_write(ff, &len, sizeof(len));
if (ret < 0)
return ret;
return write_padded(ff, str, olen, len);
}
static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
{
ssize_t ret = readn(ff->fd, addr, size);
if (ret != size)
return ret < 0 ? (int)ret : -1;
return 0;
}
static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
{
if (size > (ssize_t)ff->size - ff->offset)
return -1;
memcpy(addr, ff->buf + ff->offset, size);
ff->offset += size;
return 0;
}
static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
{
if (!ff->buf)
return __do_read_fd(ff, addr, size);
return __do_read_buf(ff, addr, size);
}
static int do_read_u32(struct feat_fd *ff, u32 *addr)
{
int ret;
ret = __do_read(ff, addr, sizeof(*addr));
if (ret)
return ret;
if (ff->ph->needs_swap)
*addr = bswap_32(*addr);
return 0;
}
static int do_read_u64(struct feat_fd *ff, u64 *addr)
{
int ret;
ret = __do_read(ff, addr, sizeof(*addr));
if (ret)
return ret;
if (ff->ph->needs_swap)
*addr = bswap_64(*addr);
return 0;
}
static char *do_read_string(struct feat_fd *ff)
{
u32 len;
char *buf;
if (do_read_u32(ff, &len))
return NULL;
buf = malloc(len);
if (!buf)
return NULL;
if (!__do_read(ff, buf, len)) {
/*
* strings are padded by zeroes
* thus the actual strlen of buf
* may be less than len
*/
return buf;
}
free(buf);
return NULL;
}
/* Return: 0 if succeeded, -ERR if failed. */
static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
{
unsigned long *set;
u64 size, *p;
int i, ret;
ret = do_read_u64(ff, &size);
if (ret)
return ret;
set = bitmap_zalloc(size);
if (!set)
return -ENOMEM;
p = (u64 *) set;
for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
ret = do_read_u64(ff, p + i);
if (ret < 0) {
free(set);
return ret;
}
}
*pset = set;
*psize = size;
return 0;
}
#ifdef HAVE_LIBTRACEEVENT
static int write_tracing_data(struct feat_fd *ff,
struct evlist *evlist)
{
if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
return -1;
return read_tracing_data(ff->fd, &evlist->core.entries);
}
#endif
static int write_build_id(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_session *session;
int err;
session = container_of(ff->ph, struct perf_session, header);
if (!perf_session__read_build_ids(session, true))
return -1;
if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
return -1;
err = perf_session__write_buildid_table(session, ff);
if (err < 0) {
pr_debug("failed to write buildid table\n");
return err;
}
perf_session__cache_build_ids(session);
return 0;
}
static int write_hostname(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct utsname uts;
int ret;
ret = uname(&uts);
if (ret < 0)
return -1;
return do_write_string(ff, uts.nodename);
}
static int write_osrelease(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct utsname uts;
int ret;
ret = uname(&uts);
if (ret < 0)
return -1;
return do_write_string(ff, uts.release);
}
static int write_arch(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct utsname uts;
int ret;
ret = uname(&uts);
if (ret < 0)
return -1;
return do_write_string(ff, uts.machine);
}
static int write_version(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
return do_write_string(ff, perf_version_string);
}
static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
{
FILE *file;
char *buf = NULL;
char *s, *p;
const char *search = cpuinfo_proc;
size_t len = 0;
int ret = -1;
if (!search)
return -1;
file = fopen("/proc/cpuinfo", "r");
if (!file)
return -1;
while (getline(&buf, &len, file) > 0) {
ret = strncmp(buf, search, strlen(search));
if (!ret)
break;
}
if (ret) {
ret = -1;
goto done;
}
s = buf;
p = strchr(buf, ':');
if (p && *(p+1) == ' ' && *(p+2))
s = p + 2;
p = strchr(s, '\n');
if (p)
*p = '\0';
/* squash extra space characters (branding string) */
p = s;
while (*p) {
if (isspace(*p)) {
char *r = p + 1;
char *q = skip_spaces(r);
*p = ' ';
if (q != (p+1))
while ((*r++ = *q++));
}
p++;
}
ret = do_write_string(ff, s);
done:
free(buf);
fclose(file);
return ret;
}
static int write_cpudesc(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
#if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
#define CPUINFO_PROC { "cpu", }
#elif defined(__s390__)
#define CPUINFO_PROC { "vendor_id", }
#elif defined(__sh__)
#define CPUINFO_PROC { "cpu type", }
#elif defined(__alpha__) || defined(__mips__)
#define CPUINFO_PROC { "cpu model", }
#elif defined(__arm__)
#define CPUINFO_PROC { "model name", "Processor", }
#elif defined(__arc__)
#define CPUINFO_PROC { "Processor", }
#elif defined(__xtensa__)
#define CPUINFO_PROC { "core ID", }
#elif defined(__loongarch__)
#define CPUINFO_PROC { "Model Name", }
#else
#define CPUINFO_PROC { "model name", }
#endif
const char *cpuinfo_procs[] = CPUINFO_PROC;
#undef CPUINFO_PROC
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
int ret;
ret = __write_cpudesc(ff, cpuinfo_procs[i]);
if (ret >= 0)
return ret;
}
return -1;
}
static int write_nrcpus(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
long nr;
u32 nrc, nra;
int ret;
nrc = cpu__max_present_cpu().cpu;
nr = sysconf(_SC_NPROCESSORS_ONLN);
if (nr < 0)
return -1;
nra = (u32)(nr & UINT_MAX);
ret = do_write(ff, &nrc, sizeof(nrc));
if (ret < 0)
return ret;
return do_write(ff, &nra, sizeof(nra));
}
static int write_event_desc(struct feat_fd *ff,
struct evlist *evlist)
{
struct evsel *evsel;
u32 nre, nri, sz;
int ret;
nre = evlist->core.nr_entries;
/*
* write number of events
*/
ret = do_write(ff, &nre, sizeof(nre));
if (ret < 0)
return ret;
/*
* size of perf_event_attr struct
*/
sz = (u32)sizeof(evsel->core.attr);
ret = do_write(ff, &sz, sizeof(sz));
if (ret < 0)
return ret;
evlist__for_each_entry(evlist, evsel) {
ret = do_write(ff, &evsel->core.attr, sz);
if (ret < 0)
return ret;
/*
* write number of unique id per event
* there is one id per instance of an event
*
* copy into an nri to be independent of the
* type of ids,
*/
nri = evsel->core.ids;
ret = do_write(ff, &nri, sizeof(nri));
if (ret < 0)
return ret;
/*
* write event string as passed on cmdline
*/
ret = do_write_string(ff, evsel__name(evsel));
if (ret < 0)
return ret;
/*
* write unique ids for this event
*/
ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (ret < 0)
return ret;
}
return 0;
}
static int write_cmdline(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
char pbuf[MAXPATHLEN], *buf;
int i, ret, n;
/* actual path to perf binary */
buf = perf_exe(pbuf, MAXPATHLEN);
/* account for binary path */
n = perf_env.nr_cmdline + 1;
ret = do_write(ff, &n, sizeof(n));
if (ret < 0)
return ret;
ret = do_write_string(ff, buf);
if (ret < 0)
return ret;
for (i = 0 ; i < perf_env.nr_cmdline; i++) {
ret = do_write_string(ff, perf_env.cmdline_argv[i]);
if (ret < 0)
return ret;
}
return 0;
}
static int write_cpu_topology(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct cpu_topology *tp;
u32 i;
int ret, j;
tp = cpu_topology__new();
if (!tp)
return -1;
ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists));
if (ret < 0)
goto done;
for (i = 0; i < tp->package_cpus_lists; i++) {
ret = do_write_string(ff, tp->package_cpus_list[i]);
if (ret < 0)
goto done;
}
ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists));
if (ret < 0)
goto done;
for (i = 0; i < tp->core_cpus_lists; i++) {
ret = do_write_string(ff, tp->core_cpus_list[i]);
if (ret < 0)
break;
}
ret = perf_env__read_cpu_topology_map(&perf_env);
if (ret < 0)
goto done;
for (j = 0; j < perf_env.nr_cpus_avail; j++) {
ret = do_write(ff, &perf_env.cpu[j].core_id,
sizeof(perf_env.cpu[j].core_id));
if (ret < 0)
return ret;
ret = do_write(ff, &perf_env.cpu[j].socket_id,
sizeof(perf_env.cpu[j].socket_id));
if (ret < 0)
return ret;
}
if (!tp->die_cpus_lists)
goto done;
ret = do_write(ff, &tp->die_cpus_lists, sizeof(tp->die_cpus_lists));
if (ret < 0)
goto done;
for (i = 0; i < tp->die_cpus_lists; i++) {
ret = do_write_string(ff, tp->die_cpus_list[i]);
if (ret < 0)
goto done;
}
for (j = 0; j < perf_env.nr_cpus_avail; j++) {
ret = do_write(ff, &perf_env.cpu[j].die_id,
sizeof(perf_env.cpu[j].die_id));
if (ret < 0)
return ret;
}
done:
cpu_topology__delete(tp);
return ret;
}
static int write_total_mem(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
char *buf = NULL;
FILE *fp;
size_t len = 0;
int ret = -1, n;
uint64_t mem;
fp = fopen("/proc/meminfo", "r");
if (!fp)
return -1;
while (getline(&buf, &len, fp) > 0) {
ret = strncmp(buf, "MemTotal:", 9);
if (!ret)
break;
}
if (!ret) {
n = sscanf(buf, "%*s %"PRIu64, &mem);
if (n == 1)
ret = do_write(ff, &mem, sizeof(mem));
} else
ret = -1;
free(buf);
fclose(fp);
return ret;
}
static int write_numa_topology(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct numa_topology *tp;
int ret = -1;
u32 i;
tp = numa_topology__new();
if (!tp)
return -ENOMEM;
ret = do_write(ff, &tp->nr, sizeof(u32));
if (ret < 0)
goto err;
for (i = 0; i < tp->nr; i++) {
struct numa_topology_node *n = &tp->nodes[i];
ret = do_write(ff, &n->node, sizeof(u32));
if (ret < 0)
goto err;
ret = do_write(ff, &n->mem_total, sizeof(u64));
if (ret)
goto err;
ret = do_write(ff, &n->mem_free, sizeof(u64));
if (ret)
goto err;
ret = do_write_string(ff, n->cpus);
if (ret < 0)
goto err;
}
ret = 0;
err:
numa_topology__delete(tp);
return ret;
}
/*
* File format:
*
* struct pmu_mappings {
* u32 pmu_num;
* struct pmu_map {
* u32 type;
* char name[];
* }[pmu_num];
* };
*/
static int write_pmu_mappings(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_pmu *pmu = NULL;
u32 pmu_num = 0;
int ret;
/*
* Do a first pass to count number of pmu to avoid lseek so this
* works in pipe mode as well.
*/
while ((pmu = perf_pmus__scan(pmu)))
pmu_num++;
ret = do_write(ff, &pmu_num, sizeof(pmu_num));
if (ret < 0)
return ret;
while ((pmu = perf_pmus__scan(pmu))) {
ret = do_write(ff, &pmu->type, sizeof(pmu->type));
if (ret < 0)
return ret;
ret = do_write_string(ff, pmu->name);
if (ret < 0)
return ret;
}
return 0;
}
/*
* File format:
*
* struct group_descs {
* u32 nr_groups;
* struct group_desc {
* char name[];
* u32 leader_idx;
* u32 nr_members;
* }[nr_groups];
* };
*/
static int write_group_desc(struct feat_fd *ff,
struct evlist *evlist)
{
u32 nr_groups = evlist__nr_groups(evlist);
struct evsel *evsel;
int ret;
ret = do_write(ff, &nr_groups, sizeof(nr_groups));
if (ret < 0)
return ret;
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
const char *name = evsel->group_name ?: "{anon_group}";
u32 leader_idx = evsel->core.idx;
u32 nr_members = evsel->core.nr_members;
ret = do_write_string(ff, name);
if (ret < 0)
return ret;
ret = do_write(ff, &leader_idx, sizeof(leader_idx));
if (ret < 0)
return ret;
ret = do_write(ff, &nr_members, sizeof(nr_members));
if (ret < 0)
return ret;
}
}
return 0;
}
/*
* Return the CPU id as a raw string.
*
* Each architecture should provide a more precise id string that
* can be use to match the architecture's "mapfile".
*/
char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
return NULL;
}
/* Return zero when the cpuid from the mapfile.csv matches the
* cpuid string generated on this platform.
* Otherwise return non-zero.
*/
int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
{
regex_t re;
regmatch_t pmatch[1];
int match;
if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
/* Warn unable to generate match particular string. */
pr_info("Invalid regular expression %s\n", mapcpuid);
return 1;
}
match = !regexec(&re, cpuid, 1, pmatch, 0);
regfree(&re);
if (match) {
size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
/* Verify the entire string matched. */
if (match_len == strlen(cpuid))
return 0;
}
return 1;
}
/*
* default get_cpuid(): nothing gets recorded
* actual implementation must be in arch/$(SRCARCH)/util/header.c
*/
int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
{
return ENOSYS; /* Not implemented */
}
static int write_cpuid(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
char buffer[64];
int ret;
ret = get_cpuid(buffer, sizeof(buffer));
if (ret)
return -1;
return do_write_string(ff, buffer);
}
static int write_branch_stack(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
return 0;
}
static int write_auxtrace(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_session *session;
int err;
if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
return -1;
session = container_of(ff->ph, struct perf_session, header);
err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
if (err < 0)
pr_err("Failed to write auxtrace index\n");
return err;
}
static int write_clockid(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
sizeof(ff->ph->env.clock.clockid_res_ns));
}
static int write_clock_data(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
u64 *data64;
u32 data32;
int ret;
/* version */
data32 = 1;
ret = do_write(ff, &data32, sizeof(data32));
if (ret < 0)
return ret;
/* clockid */
data32 = ff->ph->env.clock.clockid;
ret = do_write(ff, &data32, sizeof(data32));
if (ret < 0)
return ret;
/* TOD ref time */
data64 = &ff->ph->env.clock.tod_ns;
ret = do_write(ff, data64, sizeof(*data64));
if (ret < 0)
return ret;
/* clockid ref time */
data64 = &ff->ph->env.clock.clockid_ns;
return do_write(ff, data64, sizeof(*data64));
}
static int write_hybrid_topology(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct hybrid_topology *tp;
int ret;
u32 i;
tp = hybrid_topology__new();
if (!tp)
return -ENOENT;
ret = do_write(ff, &tp->nr, sizeof(u32));
if (ret < 0)
goto err;
for (i = 0; i < tp->nr; i++) {
struct hybrid_topology_node *n = &tp->nodes[i];
ret = do_write_string(ff, n->pmu_name);
if (ret < 0)
goto err;
ret = do_write_string(ff, n->cpus);
if (ret < 0)
goto err;
}
ret = 0;
err:
hybrid_topology__delete(tp);
return ret;
}
static int write_dir_format(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_session *session;
struct perf_data *data;
session = container_of(ff->ph, struct perf_session, header);
data = session->data;
if (WARN_ON(!perf_data__is_dir(data)))
return -1;
return do_write(ff, &data->dir.version, sizeof(data->dir.version));
}
/*
* Check whether a CPU is online
*
* Returns:
* 1 -> if CPU is online
* 0 -> if CPU is offline
* -1 -> error case
*/
int is_cpu_online(unsigned int cpu)
{
char *str;
size_t strlen;
char buf[256];
int status = -1;
struct stat statbuf;
snprintf(buf, sizeof(buf),
"/sys/devices/system/cpu/cpu%d", cpu);
if (stat(buf, &statbuf) != 0)
return 0;
/*
* Check if /sys/devices/system/cpu/cpux/online file
* exists. Some cases cpu0 won't have online file since
* it is not expected to be turned off generally.
* In kernels without CONFIG_HOTPLUG_CPU, this
* file won't exist
*/
snprintf(buf, sizeof(buf),
"/sys/devices/system/cpu/cpu%d/online", cpu);
if (stat(buf, &statbuf) != 0)
return 1;
/*
* Read online file using sysfs__read_str.
* If read or open fails, return -1.
* If read succeeds, return value from file
* which gets stored in "str"
*/
snprintf(buf, sizeof(buf),
"devices/system/cpu/cpu%d/online", cpu);
if (sysfs__read_str(buf, &str, &strlen) < 0)
return status;
status = atoi(str);
free(str);
return status;
}
#ifdef HAVE_LIBBPF_SUPPORT
static int write_bpf_prog_info(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
int ret;
down_read(&env->bpf_progs.lock);
ret = do_write(ff, &env->bpf_progs.infos_cnt,
sizeof(env->bpf_progs.infos_cnt));
if (ret < 0)
goto out;
root = &env->bpf_progs.infos;
next = rb_first(root);
while (next) {
struct bpf_prog_info_node *node;
size_t len;
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
len = sizeof(struct perf_bpil) +
node->info_linear->data_len;
/* before writing to file, translate address to offset */
bpil_addr_to_offs(node->info_linear);
ret = do_write(ff, node->info_linear, len);
/*
* translate back to address even when do_write() fails,
* so that this function never changes the data.
*/
bpil_offs_to_addr(node->info_linear);
if (ret < 0)
goto out;
}
out:
up_read(&env->bpf_progs.lock);
return ret;
}
static int write_bpf_btf(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
int ret;
down_read(&env->bpf_progs.lock);
ret = do_write(ff, &env->bpf_progs.btfs_cnt,
sizeof(env->bpf_progs.btfs_cnt));
if (ret < 0)
goto out;
root = &env->bpf_progs.btfs;
next = rb_first(root);
while (next) {
struct btf_node *node;
node = rb_entry(next, struct btf_node, rb_node);
next = rb_next(&node->rb_node);
ret = do_write(ff, &node->id,
sizeof(u32) * 2 + node->data_size);
if (ret < 0)
goto out;
}
out:
up_read(&env->bpf_progs.lock);
return ret;
}
#endif // HAVE_LIBBPF_SUPPORT
static int cpu_cache_level__sort(const void *a, const void *b)
{
struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
return cache_a->level - cache_b->level;
}
static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
{
if (a->level != b->level)
return false;
if (a->line_size != b->line_size)
return false;
if (a->sets != b->sets)
return false;
if (a->ways != b->ways)
return false;
if (strcmp(a->type, b->type))
return false;
if (strcmp(a->size, b->size))
return false;
if (strcmp(a->map, b->map))
return false;
return true;
}
static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
{
char path[PATH_MAX], file[PATH_MAX];
struct stat st;
size_t len;
scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
if (stat(file, &st))
return 1;
scnprintf(file, PATH_MAX, "%s/level", path);
if (sysfs__read_int(file, (int *) &cache->level))
return -1;
scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
if (sysfs__read_int(file, (int *) &cache->line_size))
return -1;
scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
if (sysfs__read_int(file, (int *) &cache->sets))
return -1;
scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
if (sysfs__read_int(file, (int *) &cache->ways))
return -1;
scnprintf(file, PATH_MAX, "%s/type", path);
if (sysfs__read_str(file, &cache->type, &len))
return -1;
cache->type[len] = 0;
cache->type = strim(cache->type);
scnprintf(file, PATH_MAX, "%s/size", path);
if (sysfs__read_str(file, &cache->size, &len)) {
zfree(&cache->type);
return -1;
}
cache->size[len] = 0;
cache->size = strim(cache->size);
scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
if (sysfs__read_str(file, &cache->map, &len)) {
zfree(&cache->size);
zfree(&cache->type);
return -1;
}
cache->map[len] = 0;
cache->map = strim(cache->map);
return 0;
}
static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
{
fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
}
/*
* Build caches levels for a particular CPU from the data in
* /sys/devices/system/cpu/cpu<cpu>/cache/
* The cache level data is stored in caches[] from index at
* *cntp.
*/
int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp)
{
u16 level;
for (level = 0; level < MAX_CACHE_LVL; level++) {
struct cpu_cache_level c;
int err;
u32 i;
err = cpu_cache_level__read(&c, cpu, level);
if (err < 0)
return err;
if (err == 1)
break;
for (i = 0; i < *cntp; i++) {
if (cpu_cache_level__cmp(&c, &caches[i]))
break;
}
if (i == *cntp) {
caches[*cntp] = c;
*cntp = *cntp + 1;
} else
cpu_cache_level__free(&c);
}
return 0;
}
static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
{
u32 nr, cpu, cnt = 0;
nr = cpu__max_cpu().cpu;
for (cpu = 0; cpu < nr; cpu++) {
int ret = build_caches_for_cpu(cpu, caches, &cnt);
if (ret)
return ret;
}
*cntp = cnt;
return 0;
}
static int write_cache(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
u32 max_caches = cpu__max_cpu().cpu * MAX_CACHE_LVL;
struct cpu_cache_level caches[max_caches];
u32 cnt = 0, i, version = 1;
int ret;
ret = build_caches(caches, &cnt);
if (ret)
goto out;
qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
ret = do_write(ff, &version, sizeof(u32));
if (ret < 0)
goto out;
ret = do_write(ff, &cnt, sizeof(u32));
if (ret < 0)
goto out;
for (i = 0; i < cnt; i++) {
struct cpu_cache_level *c = &caches[i];
#define _W(v) \
ret = do_write(ff, &c->v, sizeof(u32)); \
if (ret < 0) \
goto out;
_W(level)
_W(line_size)
_W(sets)
_W(ways)
#undef _W
#define _W(v) \
ret = do_write_string(ff, (const char *) c->v); \
if (ret < 0) \
goto out;
_W(type)
_W(size)
_W(map)
#undef _W
}
out:
for (i = 0; i < cnt; i++)
cpu_cache_level__free(&caches[i]);
return ret;
}
static int write_stat(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
return 0;
}
static int write_sample_time(struct feat_fd *ff,
struct evlist *evlist)
{
int ret;
ret = do_write(ff, &evlist->first_sample_time,
sizeof(evlist->first_sample_time));
if (ret < 0)
return ret;
return do_write(ff, &evlist->last_sample_time,
sizeof(evlist->last_sample_time));
}
static int memory_node__read(struct memory_node *n, unsigned long idx)
{
unsigned int phys, size = 0;
char path[PATH_MAX];
struct dirent *ent;
DIR *dir;
#define for_each_memory(mem, dir) \
while ((ent = readdir(dir))) \
if (strcmp(ent->d_name, ".") && \
strcmp(ent->d_name, "..") && \
sscanf(ent->d_name, "memory%u", &mem) == 1)
scnprintf(path, PATH_MAX,
"%s/devices/system/node/node%lu",
sysfs__mountpoint(), idx);
dir = opendir(path);
if (!dir) {
pr_warning("failed: can't open memory sysfs data\n");
return -1;
}
for_each_memory(phys, dir) {
size = max(phys, size);
}
size++;
n->set = bitmap_zalloc(size);
if (!n->set) {
closedir(dir);
return -ENOMEM;
}
n->node = idx;
n->size = size;
rewinddir(dir);
for_each_memory(phys, dir) {
__set_bit(phys, n->set);
}
closedir(dir);
return 0;
}
static void memory_node__delete_nodes(struct memory_node *nodesp, u64 cnt)
{
for (u64 i = 0; i < cnt; i++)
bitmap_free(nodesp[i].set);
free(nodesp);
}
static int memory_node__sort(const void *a, const void *b)
{
const struct memory_node *na = a;
const struct memory_node *nb = b;
return na->node - nb->node;
}
static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
{
char path[PATH_MAX];
struct dirent *ent;
DIR *dir;
int ret = 0;
size_t cnt = 0, size = 0;
struct memory_node *nodes = NULL;
scnprintf(path, PATH_MAX, "%s/devices/system/node/",
sysfs__mountpoint());
dir = opendir(path);
if (!dir) {
pr_debug2("%s: couldn't read %s, does this arch have topology information?\n",
__func__, path);
return -1;
}
while (!ret && (ent = readdir(dir))) {
unsigned int idx;
int r;
if (!strcmp(ent->d_name, ".") ||
!strcmp(ent->d_name, ".."))
continue;
r = sscanf(ent->d_name, "node%u", &idx);
if (r != 1)
continue;
if (cnt >= size) {
struct memory_node *new_nodes =
reallocarray(nodes, cnt + 4, sizeof(*nodes));
if (!new_nodes) {
pr_err("Failed to write MEM_TOPOLOGY, size %zd nodes\n", size);
ret = -ENOMEM;
goto out;
}
nodes = new_nodes;
size += 4;
}
ret = memory_node__read(&nodes[cnt++], idx);
}
out:
closedir(dir);
if (!ret) {
*cntp = cnt;
*nodesp = nodes;
qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
} else
memory_node__delete_nodes(nodes, cnt);
return ret;
}
/*
* The MEM_TOPOLOGY holds physical memory map for every
* node in system. The format of data is as follows:
*
* 0 - version | for future changes
* 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
* 16 - count | number of nodes
*
* For each node we store map of physical indexes for
* each node:
*
* 32 - node id | node index
* 40 - size | size of bitmap
* 48 - bitmap | bitmap of memory indexes that belongs to node
*/
static int write_mem_topology(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
struct memory_node *nodes = NULL;
u64 bsize, version = 1, i, nr = 0;
int ret;
ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
(unsigned long long *) &bsize);
if (ret)
return ret;
ret = build_mem_topology(&nodes, &nr);
if (ret)
return ret;
ret = do_write(ff, &version, sizeof(version));
if (ret < 0)
goto out;
ret = do_write(ff, &bsize, sizeof(bsize));
if (ret < 0)
goto out;
ret = do_write(ff, &nr, sizeof(nr));
if (ret < 0)
goto out;
for (i = 0; i < nr; i++) {
struct memory_node *n = &nodes[i];
#define _W(v) \
ret = do_write(ff, &n->v, sizeof(n->v)); \
if (ret < 0) \
goto out;
_W(node)
_W(size)
#undef _W
ret = do_write_bitmap(ff, n->set, n->size);
if (ret < 0)
goto out;
}
out:
memory_node__delete_nodes(nodes, nr);
return ret;
}
static int write_compressed(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
int ret;
ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
if (ret)
return ret;
ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
if (ret)
return ret;
ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
if (ret)
return ret;
ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
if (ret)
return ret;
return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
}
static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
bool write_pmu)
{
struct perf_pmu_caps *caps = NULL;
int ret;
ret = do_write(ff, &pmu->nr_caps, sizeof(pmu->nr_caps));
if (ret < 0)
return ret;
list_for_each_entry(caps, &pmu->caps, list) {
ret = do_write_string(ff, caps->name);
if (ret < 0)
return ret;
ret = do_write_string(ff, caps->value);
if (ret < 0)
return ret;
}
if (write_pmu) {
ret = do_write_string(ff, pmu->name);
if (ret < 0)
return ret;
}
return ret;
}
static int write_cpu_pmu_caps(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_pmu *cpu_pmu = perf_pmus__find("cpu");
int ret;
if (!cpu_pmu)
return -ENOENT;
ret = perf_pmu__caps_parse(cpu_pmu);
if (ret < 0)
return ret;
return __write_pmu_caps(ff, cpu_pmu, false);
}
static int write_pmu_caps(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_pmu *pmu = NULL;
int nr_pmu = 0;
int ret;
while ((pmu = perf_pmus__scan(pmu))) {
if (!strcmp(pmu->name, "cpu")) {
/*
* The "cpu" PMU is special and covered by
* HEADER_CPU_PMU_CAPS. Note, core PMUs are
* counted/written here for ARM, s390 and Intel hybrid.
*/
continue;
}
if (perf_pmu__caps_parse(pmu) <= 0)
continue;
nr_pmu++;
}
ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
if (ret < 0)
return ret;
if (!nr_pmu)
return 0;
/*
* Note older perf tools assume core PMUs come first, this is a property
* of perf_pmus__scan.
*/
pmu = NULL;
while ((pmu = perf_pmus__scan(pmu))) {
if (!strcmp(pmu->name, "cpu")) {
/* Skip as above. */
continue;
}
if (perf_pmu__caps_parse(pmu) <= 0)
continue;
ret = __write_pmu_caps(ff, pmu, true);
if (ret < 0)
return ret;
}
return 0;
}
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
}
static void print_osrelease(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
}
static void print_arch(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
}
static void print_cpudesc(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
}
static void print_nrcpus(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
}
static void print_version(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
}
static void print_cmdline(struct feat_fd *ff, FILE *fp)
{
int nr, i;
nr = ff->ph->env.nr_cmdline;
fprintf(fp, "# cmdline : ");
for (i = 0; i < nr; i++) {
char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
if (!argv_i) {
fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
} else {
char *mem = argv_i;
do {
char *quote = strchr(argv_i, '\'');
if (!quote)
break;
*quote++ = '\0';
fprintf(fp, "%s\\\'", argv_i);
argv_i = quote;
} while (1);
fprintf(fp, "%s ", argv_i);
free(mem);
}
}
fputc('\n', fp);
}
static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
{
struct perf_header *ph = ff->ph;
int cpu_nr = ph->env.nr_cpus_avail;
int nr, i;
char *str;
nr = ph->env.nr_sibling_cores;
str = ph->env.sibling_cores;
for (i = 0; i < nr; i++) {
fprintf(fp, "# sibling sockets : %s\n", str);
str += strlen(str) + 1;
}
if (ph->env.nr_sibling_dies) {
nr = ph->env.nr_sibling_dies;
str = ph->env.sibling_dies;
for (i = 0; i < nr; i++) {
fprintf(fp, "# sibling dies : %s\n", str);
str += strlen(str) + 1;
}
}
nr = ph->env.nr_sibling_threads;
str = ph->env.sibling_threads;
for (i = 0; i < nr; i++) {
fprintf(fp, "# sibling threads : %s\n", str);
str += strlen(str) + 1;
}
if (ph->env.nr_sibling_dies) {
if (ph->env.cpu != NULL) {
for (i = 0; i < cpu_nr; i++)
fprintf(fp, "# CPU %d: Core ID %d, "
"Die ID %d, Socket ID %d\n",
i, ph->env.cpu[i].core_id,
ph->env.cpu[i].die_id,
ph->env.cpu[i].socket_id);
} else
fprintf(fp, "# Core ID, Die ID and Socket ID "
"information is not available\n");
} else {
if (ph->env.cpu != NULL) {
for (i = 0; i < cpu_nr; i++)
fprintf(fp, "# CPU %d: Core ID %d, "
"Socket ID %d\n",
i, ph->env.cpu[i].core_id,
ph->env.cpu[i].socket_id);
} else
fprintf(fp, "# Core ID and Socket ID "
"information is not available\n");
}
}
static void print_clockid(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
ff->ph->env.clock.clockid_res_ns * 1000);
}
static void print_clock_data(struct feat_fd *ff, FILE *fp)
{
struct timespec clockid_ns;
char tstr[64], date[64];
struct timeval tod_ns;
clockid_t clockid;
struct tm ltime;
u64 ref;
if (!ff->ph->env.clock.enabled) {
fprintf(fp, "# reference time disabled\n");
return;
}
/* Compute TOD time. */
ref = ff->ph->env.clock.tod_ns;
tod_ns.tv_sec = ref / NSEC_PER_SEC;
ref -= tod_ns.tv_sec * NSEC_PER_SEC;
tod_ns.tv_usec = ref / NSEC_PER_USEC;
/* Compute clockid time. */
ref = ff->ph->env.clock.clockid_ns;
clockid_ns.tv_sec = ref / NSEC_PER_SEC;
ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
clockid_ns.tv_nsec = ref;
clockid = ff->ph->env.clock.clockid;
if (localtime_r(&tod_ns.tv_sec, <ime) == NULL)
snprintf(tstr, sizeof(tstr), "<error>");
else {
strftime(date, sizeof(date), "%F %T", <ime);
scnprintf(tstr, sizeof(tstr), "%s.%06d",
date, (int) tod_ns.tv_usec);
}
fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
tstr, (long) tod_ns.tv_sec, (int) tod_ns.tv_usec,
(long) clockid_ns.tv_sec, clockid_ns.tv_nsec,
clockid_name(clockid));
}
static void print_hybrid_topology(struct feat_fd *ff, FILE *fp)
{
int i;
struct hybrid_node *n;
fprintf(fp, "# hybrid cpu system:\n");
for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) {
n = &ff->ph->env.hybrid_nodes[i];
fprintf(fp, "# %s cpu list : %s\n", n->pmu_name, n->cpus);
}
}
static void print_dir_format(struct feat_fd *ff, FILE *fp)
{
struct perf_session *session;
struct perf_data *data;
session = container_of(ff->ph, struct perf_session, header);
data = session->data;
fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
}
#ifdef HAVE_LIBBPF_SUPPORT
static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
down_read(&env->bpf_progs.lock);
root = &env->bpf_progs.infos;
next = rb_first(root);
while (next) {
struct bpf_prog_info_node *node;
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
bpf_event__print_bpf_prog_info(&node->info_linear->info,
env, fp);
}
up_read(&env->bpf_progs.lock);
}
static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
down_read(&env->bpf_progs.lock);
root = &env->bpf_progs.btfs;
next = rb_first(root);
while (next) {
struct btf_node *node;
node = rb_entry(next, struct btf_node, rb_node);
next = rb_next(&node->rb_node);
fprintf(fp, "# btf info of id %u\n", node->id);
}
up_read(&env->bpf_progs.lock);
}
#endif // HAVE_LIBBPF_SUPPORT
static void free_event_desc(struct evsel *events)
{
struct evsel *evsel;
if (!events)
return;
for (evsel = events; evsel->core.attr.size; evsel++) {
zfree(&evsel->name);
zfree(&evsel->core.id);
}
free(events);
}
static bool perf_attr_check(struct perf_event_attr *attr)
{
if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
pr_warning("Reserved bits are set unexpectedly. "
"Please update perf tool.\n");
return false;
}
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
pr_warning("Unknown sample type (0x%llx) is detected. "
"Please update perf tool.\n",
attr->sample_type);
return false;
}
if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
pr_warning("Unknown read format (0x%llx) is detected. "
"Please update perf tool.\n",
attr->read_format);
return false;
}
if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
(attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
pr_warning("Unknown branch sample type (0x%llx) is detected. "
"Please update perf tool.\n",
attr->branch_sample_type);
return false;
}
return true;
}
static struct evsel *read_event_desc(struct feat_fd *ff)
{
struct evsel *evsel, *events = NULL;
u64 *id;
void *buf = NULL;
u32 nre, sz, nr, i, j;
size_t msz;
/* number of events */
if (do_read_u32(ff, &nre))
goto error;
if (do_read_u32(ff, &sz))
goto error;
/* buffer to hold on file attr struct */
buf = malloc(sz);
if (!buf)
goto error;
/* the last event terminates with evsel->core.attr.size == 0: */
events = calloc(nre + 1, sizeof(*events));
if (!events)
goto error;
msz = sizeof(evsel->core.attr);
if (sz < msz)
msz = sz;
for (i = 0, evsel = events; i < nre; evsel++, i++) {
evsel->core.idx = i;
/*
* must read entire on-file attr struct to
* sync up with layout.
*/
if (__do_read(ff, buf, sz))
goto error;
if (ff->ph->needs_swap)
perf_event__attr_swap(buf);
memcpy(&evsel->core.attr, buf, msz);
if (!perf_attr_check(&evsel->core.attr))
goto error;
if (do_read_u32(ff, &nr))
goto error;
if (ff->ph->needs_swap)
evsel->needs_swap = true;
evsel->name = do_read_string(ff);
if (!evsel->name)
goto error;
if (!nr)
continue;
id = calloc(nr, sizeof(*id));
if (!id)
goto error;
evsel->core.ids = nr;
evsel->core.id = id;
for (j = 0 ; j < nr; j++) {
if (do_read_u64(ff, id))
goto error;
id++;
}
}
out:
free(buf);
return events;
error:
free_event_desc(events);
events = NULL;
goto out;
}
static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
void *priv __maybe_unused)
{
return fprintf(fp, ", %s = %s", name, val);
}
static void print_event_desc(struct feat_fd *ff, FILE *fp)
{
struct evsel *evsel, *events;
u32 j;
u64 *id;
if (ff->events)
events = ff->events;
else
events = read_event_desc(ff);
if (!events) {
fprintf(fp, "# event desc: not available or unable to read\n");
return;
}
for (evsel = events; evsel->core.attr.size; evsel++) {
fprintf(fp, "# event : name = %s, ", evsel->name);
if (evsel->core.ids) {
fprintf(fp, ", id = {");
for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
if (j)
fputc(',', fp);
fprintf(fp, " %"PRIu64, *id);
}
fprintf(fp, " }");
}
perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
fputc('\n', fp);
}
free_event_desc(events);
ff->events = NULL;
}
static void print_total_mem(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
}
static void print_numa_topology(struct feat_fd *ff, FILE *fp)
{
int i;
struct numa_node *n;
for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
n = &ff->ph->env.numa_nodes[i];
fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
" free = %"PRIu64" kB\n",
n->node, n->mem_total, n->mem_free);
fprintf(fp, "# node%u cpu list : ", n->node);
cpu_map__fprintf(n->map, fp);
}
}
static void print_cpuid(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
}
static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
{
fprintf(fp, "# contains samples with branch stack\n");
}
static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
{
fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
}
static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
{
fprintf(fp, "# contains stat data\n");
}
static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
{
int i;
fprintf(fp, "# CPU cache info:\n");
for (i = 0; i < ff->ph->env.caches_cnt; i++) {
fprintf(fp, "# ");
cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
}
}
static void print_compressed(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
ff->ph->env.comp_level, ff->ph->env.comp_ratio);
}
static void __print_pmu_caps(FILE *fp, int nr_caps, char **caps, char *pmu_name)
{
const char *delimiter = "";
int i;
if (!nr_caps) {
fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
return;
}
fprintf(fp, "# %s pmu capabilities: ", pmu_name);
for (i = 0; i < nr_caps; i++) {
fprintf(fp, "%s%s", delimiter, caps[i]);
delimiter = ", ";
}
fprintf(fp, "\n");
}
static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
{
__print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
ff->ph->env.cpu_pmu_caps, (char *)"cpu");
}
static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
{
struct pmu_caps *pmu_caps;
for (int i = 0; i < ff->ph->env.nr_pmus_with_caps; i++) {
pmu_caps = &ff->ph->env.pmu_caps[i];
__print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
pmu_caps->pmu_name);
}
}
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
{
const char *delimiter = "# pmu mappings: ";
char *str, *tmp;
u32 pmu_num;
u32 type;
pmu_num = ff->ph->env.nr_pmu_mappings;
if (!pmu_num) {
fprintf(fp, "# pmu mappings: not available\n");
return;
}
str = ff->ph->env.pmu_mappings;
while (pmu_num) {
type = strtoul(str, &tmp, 0);
if (*tmp != ':')
goto error;
str = tmp + 1;
fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
delimiter = ", ";
str += strlen(str) + 1;
pmu_num--;
}
fprintf(fp, "\n");
if (!pmu_num)
return;
error:
fprintf(fp, "# pmu mappings: unable to read\n");
}
static void print_group_desc(struct feat_fd *ff, FILE *fp)
{
struct perf_session *session;
struct evsel *evsel;
u32 nr = 0;
session = container_of(ff->ph, struct perf_session, header);
evlist__for_each_entry(session->evlist, evsel) {
if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
nr = evsel->core.nr_members - 1;
} else if (nr) {
fprintf(fp, ",%s", evsel__name(evsel));
if (--nr == 0)
fprintf(fp, "}\n");
}
}
}
static void print_sample_time(struct feat_fd *ff, FILE *fp)
{
struct perf_session *session;
char time_buf[32];
double d;
session = container_of(ff->ph, struct perf_session, header);
timestamp__scnprintf_usec(session->evlist->first_sample_time,
time_buf, sizeof(time_buf));
fprintf(fp, "# time of first sample : %s\n", time_buf);
timestamp__scnprintf_usec(session->evlist->last_sample_time,
time_buf, sizeof(time_buf));
fprintf(fp, "# time of last sample : %s\n", time_buf);
d = (double)(session->evlist->last_sample_time -
session->evlist->first_sample_time) / NSEC_PER_MSEC;
fprintf(fp, "# sample duration : %10.3f ms\n", d);
}
static void memory_node__fprintf(struct memory_node *n,
unsigned long long bsize, FILE *fp)
{
char buf_map[100], buf_size[50];
unsigned long long size;
size = bsize * bitmap_weight(n->set, n->size);
unit_number__scnprintf(buf_size, 50, size);
bitmap_scnprintf(n->set, n->size, buf_map, 100);
fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
}
static void print_mem_topology(struct feat_fd *ff, FILE *fp)
{
struct memory_node *nodes;
int i, nr;
nodes = ff->ph->env.memory_nodes;
nr = ff->ph->env.nr_memory_nodes;
fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
nr, ff->ph->env.memory_bsize);
for (i = 0; i < nr; i++) {
memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
}
}
static int __event_process_build_id(struct perf_record_header_build_id *bev,
char *filename,
struct perf_session *session)
{
int err = -1;
struct machine *machine;
u16 cpumode;
struct dso *dso;
enum dso_space_type dso_space;
machine = perf_session__findnew_machine(session, bev->pid);
if (!machine)
goto out;
cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
switch (cpumode) {
case PERF_RECORD_MISC_KERNEL:
dso_space = DSO_SPACE__KERNEL;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
dso_space = DSO_SPACE__KERNEL_GUEST;
break;
case PERF_RECORD_MISC_USER:
case PERF_RECORD_MISC_GUEST_USER:
dso_space = DSO_SPACE__USER;
break;
default:
goto out;
}
dso = machine__findnew_dso(machine, filename);
if (dso != NULL) {
char sbuild_id[SBUILD_ID_SIZE];
struct build_id bid;
size_t size = BUILD_ID_SIZE;
if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
size = bev->size;
build_id__init(&bid, bev->data, size);
dso__set_build_id(dso, &bid);
dso->header_build_id = 1;
if (dso_space != DSO_SPACE__USER) {
struct kmod_path m = { .name = NULL, };
if (!kmod_path__parse_name(&m, filename) && m.kmod)
dso__set_module_info(dso, &m, machine);
dso->kernel = dso_space;
free(m.name);
}
build_id__sprintf(&dso->bid, sbuild_id);
pr_debug("build id event received for %s: %s [%zu]\n",
dso->long_name, sbuild_id, size);
dso__put(dso);
}
err = 0;
out:
return err;
}
static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
int input, u64 offset, u64 size)
{
struct perf_session *session = container_of(header, struct perf_session, header);
struct {
struct perf_event_header header;
u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
char filename[0];
} old_bev;
struct perf_record_header_build_id bev;
char filename[PATH_MAX];
u64 limit = offset + size;
while (offset < limit) {
ssize_t len;
if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
return -1;
if (header->needs_swap)
perf_event_header__bswap(&old_bev.header);
len = old_bev.header.size - sizeof(old_bev);
if (readn(input, filename, len) != len)
return -1;
bev.header = old_bev.header;
/*
* As the pid is the missing value, we need to fill
* it properly. The header.misc value give us nice hint.
*/
bev.pid = HOST_KERNEL_ID;
if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
bev.pid = DEFAULT_GUEST_KERNEL_ID;
memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
__event_process_build_id(&bev, filename, session);
offset += bev.header.size;
}
return 0;
}
static int perf_header__read_build_ids(struct perf_header *header,
int input, u64 offset, u64 size)
{
struct perf_session *session = container_of(header, struct perf_session, header);
struct perf_record_header_build_id bev;
char filename[PATH_MAX];
u64 limit = offset + size, orig_offset = offset;
int err = -1;
while (offset < limit) {
ssize_t len;
if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
goto out;
if (header->needs_swap)
perf_event_header__bswap(&bev.header);
len = bev.header.size - sizeof(bev);
if (readn(input, filename, len) != len)
goto out;
/*
* The a1645ce1 changeset:
*
* "perf: 'perf kvm' tool for monitoring guest performance from host"
*
* Added a field to struct perf_record_header_build_id that broke the file
* format.
*
* Since the kernel build-id is the first entry, process the
* table using the old format if the well known
* '[kernel.kallsyms]' string for the kernel build-id has the
* first 4 characters chopped off (where the pid_t sits).
*/
if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
return -1;
return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
}
__event_process_build_id(&bev, filename, session);
offset += bev.header.size;
}
err = 0;
out:
return err;
}
/* Macro for features that simply need to read and store a string. */
#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
{\
free(ff->ph->env.__feat_env); \
ff->ph->env.__feat_env = do_read_string(ff); \
return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
}
FEAT_PROCESS_STR_FUN(hostname, hostname);
FEAT_PROCESS_STR_FUN(osrelease, os_release);
FEAT_PROCESS_STR_FUN(version, version);
FEAT_PROCESS_STR_FUN(arch, arch);
FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
FEAT_PROCESS_STR_FUN(cpuid, cpuid);
#ifdef HAVE_LIBTRACEEVENT
static int process_tracing_data(struct feat_fd *ff, void *data)
{
ssize_t ret = trace_report(ff->fd, data, false);
return ret < 0 ? -1 : 0;
}
#endif
static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
{
if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
pr_debug("Failed to read buildids, continuing...\n");
return 0;
}
static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
{
int ret;
u32 nr_cpus_avail, nr_cpus_online;
ret = do_read_u32(ff, &nr_cpus_avail);
if (ret)
return ret;
ret = do_read_u32(ff, &nr_cpus_online);
if (ret)
return ret;
ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
return 0;
}
static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
{
u64 total_mem;
int ret;
ret = do_read_u64(ff, &total_mem);
if (ret)
return -1;
ff->ph->env.total_mem = (unsigned long long)total_mem;
return 0;
}
static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.idx == idx)
return evsel;
}
return NULL;
}
static void evlist__set_event_name(struct evlist *evlist, struct evsel *event)
{
struct evsel *evsel;
if (!event->name)
return;
evsel = evlist__find_by_index(evlist, event->core.idx);
if (!evsel)
return;
if (evsel->name)
return;
evsel->name = strdup(event->name);
}
static int
process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_session *session;
struct evsel *evsel, *events = read_event_desc(ff);
if (!events)
return 0;
session = container_of(ff->ph, struct perf_session, header);
if (session->data->is_pipe) {
/* Save events for reading later by print_event_desc,
* since they can't be read again in pipe mode. */
ff->events = events;
}
for (evsel = events; evsel->core.attr.size; evsel++)
evlist__set_event_name(session->evlist, evsel);
if (!session->data->is_pipe)
free_event_desc(events);
return 0;
}
static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
{
char *str, *cmdline = NULL, **argv = NULL;
u32 nr, i, len = 0;
if (do_read_u32(ff, &nr))
return -1;
ff->ph->env.nr_cmdline = nr;
cmdline = zalloc(ff->size + nr + 1);
if (!cmdline)
return -1;
argv = zalloc(sizeof(char *) * (nr + 1));
if (!argv)
goto error;
for (i = 0; i < nr; i++) {
str = do_read_string(ff);
if (!str)
goto error;
argv[i] = cmdline + len;
memcpy(argv[i], str, strlen(str) + 1);
len += strlen(str) + 1;
free(str);
}
ff->ph->env.cmdline = cmdline;
ff->ph->env.cmdline_argv = (const char **) argv;
return 0;
error:
free(argv);
free(cmdline);
return -1;
}
static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
{
u32 nr, i;
char *str;
struct strbuf sb;
int cpu_nr = ff->ph->env.nr_cpus_avail;
u64 size = 0;
struct perf_header *ph = ff->ph;
bool do_core_id_test = true;
ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
if (!ph->env.cpu)
return -1;
if (do_read_u32(ff, &nr))
goto free_cpu;
ph->env.nr_sibling_cores = nr;
size += sizeof(u32);
if (strbuf_init(&sb, 128) < 0)
goto free_cpu;
for (i = 0; i < nr; i++) {
str = do_read_string(ff);
if (!str)
goto error;
/* include a NULL character at the end */
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
free(str);
}
ph->env.sibling_cores = strbuf_detach(&sb, NULL);
if (do_read_u32(ff, &nr))
return -1;
ph->env.nr_sibling_threads = nr;
size += sizeof(u32);
for (i = 0; i < nr; i++) {
str = do_read_string(ff);
if (!str)
goto error;
/* include a NULL character at the end */
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
free(str);
}
ph->env.sibling_threads = strbuf_detach(&sb, NULL);
/*
* The header may be from old perf,
* which doesn't include core id and socket id information.
*/
if (ff->size <= size) {
zfree(&ph->env.cpu);
return 0;
}
/* On s390 the socket_id number is not related to the numbers of cpus.
* The socket_id number might be higher than the numbers of cpus.
* This depends on the configuration.
* AArch64 is the same.
*/
if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
|| !strncmp(ph->env.arch, "aarch64", 7)))
do_core_id_test = false;
for (i = 0; i < (u32)cpu_nr; i++) {
if (do_read_u32(ff, &nr))
goto free_cpu;
ph->env.cpu[i].core_id = nr;
size += sizeof(u32);
if (do_read_u32(ff, &nr))
goto free_cpu;
if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
pr_debug("socket_id number is too big."
"You may need to upgrade the perf tool.\n");
goto free_cpu;
}
ph->env.cpu[i].socket_id = nr;
size += sizeof(u32);
}
/*
* The header may be from old perf,
* which doesn't include die information.
*/
if (ff->size <= size)
return 0;
if (do_read_u32(ff, &nr))
return -1;
ph->env.nr_sibling_dies = nr;
size += sizeof(u32);
for (i = 0; i < nr; i++) {
str = do_read_string(ff);
if (!str)
goto error;
/* include a NULL character at the end */
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
free(str);
}
ph->env.sibling_dies = strbuf_detach(&sb, NULL);
for (i = 0; i < (u32)cpu_nr; i++) {
if (do_read_u32(ff, &nr))
goto free_cpu;
ph->env.cpu[i].die_id = nr;
}
return 0;
error:
strbuf_release(&sb);
free_cpu:
zfree(&ph->env.cpu);
return -1;
}
static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
{
struct numa_node *nodes, *n;
u32 nr, i;
char *str;
/* nr nodes */
if (do_read_u32(ff, &nr))
return -1;
nodes = zalloc(sizeof(*nodes) * nr);
if (!nodes)
return -ENOMEM;
for (i = 0; i < nr; i++) {
n = &nodes[i];
/* node number */
if (do_read_u32(ff, &n->node))
goto error;
if (do_read_u64(ff, &n->mem_total))
goto error;
if (do_read_u64(ff, &n->mem_free))
goto error;
str = do_read_string(ff);
if (!str)
goto error;
n->map = perf_cpu_map__new(str);
if (!n->map)
goto error;
free(str);
}
ff->ph->env.nr_numa_nodes = nr;
ff->ph->env.numa_nodes = nodes;
return 0;
error:
free(nodes);
return -1;
}
static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
{
char *name;
u32 pmu_num;
u32 type;
struct strbuf sb;
if (do_read_u32(ff, &pmu_num))
return -1;
if (!pmu_num) {
pr_debug("pmu mappings not available\n");
return 0;
}
ff->ph->env.nr_pmu_mappings = pmu_num;
if (strbuf_init(&sb, 128) < 0)
return -1;
while (pmu_num) {
if (do_read_u32(ff, &type))
goto error;
name = do_read_string(ff);
if (!name)
goto error;
if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
goto error;
/* include a NULL character at the end */
if (strbuf_add(&sb, "", 1) < 0)
goto error;
if (!strcmp(name, "msr"))
ff->ph->env.msr_pmu_type = type;
free(name);
pmu_num--;
}
ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
return 0;
error:
strbuf_release(&sb);
return -1;
}
static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
{
size_t ret = -1;
u32 i, nr, nr_groups;
struct perf_session *session;
struct evsel *evsel, *leader = NULL;
struct group_desc {
char *name;
u32 leader_idx;
u32 nr_members;
} *desc;
if (do_read_u32(ff, &nr_groups))
return -1;
ff->ph->env.nr_groups = nr_groups;
if (!nr_groups) {
pr_debug("group desc not available\n");
return 0;
}
desc = calloc(nr_groups, sizeof(*desc));
if (!desc)
return -1;
for (i = 0; i < nr_groups; i++) {
desc[i].name = do_read_string(ff);
if (!desc[i].name)
goto out_free;
if (do_read_u32(ff, &desc[i].leader_idx))
goto out_free;
if (do_read_u32(ff, &desc[i].nr_members))
goto out_free;
}
/*
* Rebuild group relationship based on the group_desc
*/
session = container_of(ff->ph, struct perf_session, header);
i = nr = 0;
evlist__for_each_entry(session->evlist, evsel) {
if (i < nr_groups && evsel->core.idx == (int) desc[i].leader_idx) {
evsel__set_leader(evsel, evsel);
/* {anon_group} is a dummy name */
if (strcmp(desc[i].name, "{anon_group}")) {
evsel->group_name = desc[i].name;
desc[i].name = NULL;
}
evsel->core.nr_members = desc[i].nr_members;
if (i >= nr_groups || nr > 0) {
pr_debug("invalid group desc\n");
goto out_free;
}
leader = evsel;
nr = evsel->core.nr_members - 1;
i++;
} else if (nr) {
/* This is a group member */
evsel__set_leader(evsel, leader);
nr--;
}
}
if (i != nr_groups || nr != 0) {
pr_debug("invalid group desc\n");
goto out_free;
}
ret = 0;
out_free:
for (i = 0; i < nr_groups; i++)
zfree(&desc[i].name);
free(desc);
return ret;
}
static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_session *session;
int err;
session = container_of(ff->ph, struct perf_session, header);
err = auxtrace_index__process(ff->fd, ff->size, session,
ff->ph->needs_swap);
if (err < 0)
pr_err("Failed to process auxtrace index\n");
return err;
}
static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
{
struct cpu_cache_level *caches;
u32 cnt, i, version;
if (do_read_u32(ff, &version))
return -1;
if (version != 1)
return -1;
if (do_read_u32(ff, &cnt))
return -1;
caches = zalloc(sizeof(*caches) * cnt);
if (!caches)
return -1;
for (i = 0; i < cnt; i++) {
struct cpu_cache_level c;
#define _R(v) \
if (do_read_u32(ff, &c.v))\
goto out_free_caches; \
_R(level)
_R(line_size)
_R(sets)
_R(ways)
#undef _R
#define _R(v) \
c.v = do_read_string(ff); \
if (!c.v) \
goto out_free_caches;
_R(type)
_R(size)
_R(map)
#undef _R
caches[i] = c;
}
ff->ph->env.caches = caches;
ff->ph->env.caches_cnt = cnt;
return 0;
out_free_caches:
free(caches);
return -1;
}
static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_session *session;
u64 first_sample_time, last_sample_time;
int ret;
session = container_of(ff->ph, struct perf_session, header);
ret = do_read_u64(ff, &first_sample_time);
if (ret)
return -1;
ret = do_read_u64(ff, &last_sample_time);
if (ret)
return -1;
session->evlist->first_sample_time = first_sample_time;
session->evlist->last_sample_time = last_sample_time;
return 0;
}
static int process_mem_topology(struct feat_fd *ff,
void *data __maybe_unused)
{
struct memory_node *nodes;
u64 version, i, nr, bsize;
int ret = -1;
if (do_read_u64(ff, &version))
return -1;
if (version != 1)
return -1;
if (do_read_u64(ff, &bsize))
return -1;
if (do_read_u64(ff, &nr))
return -1;
nodes = zalloc(sizeof(*nodes) * nr);
if (!nodes)
return -1;
for (i = 0; i < nr; i++) {
struct memory_node n;
#define _R(v) \
if (do_read_u64(ff, &n.v)) \
goto out; \
_R(node)
_R(size)
#undef _R
if (do_read_bitmap(ff, &n.set, &n.size))
goto out;
nodes[i] = n;
}
ff->ph->env.memory_bsize = bsize;
ff->ph->env.memory_nodes = nodes;
ff->ph->env.nr_memory_nodes = nr;
ret = 0;
out:
if (ret)
free(nodes);
return ret;
}
static int process_clockid(struct feat_fd *ff,
void *data __maybe_unused)
{
if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
return -1;
return 0;
}
static int process_clock_data(struct feat_fd *ff,
void *_data __maybe_unused)
{
u32 data32;
u64 data64;
/* version */
if (do_read_u32(ff, &data32))
return -1;
if (data32 != 1)
return -1;
/* clockid */
if (do_read_u32(ff, &data32))
return -1;
ff->ph->env.clock.clockid = data32;
/* TOD ref time */
if (do_read_u64(ff, &data64))
return -1;
ff->ph->env.clock.tod_ns = data64;
/* clockid ref time */
if (do_read_u64(ff, &data64))
return -1;
ff->ph->env.clock.clockid_ns = data64;
ff->ph->env.clock.enabled = true;
return 0;
}
static int process_hybrid_topology(struct feat_fd *ff,
void *data __maybe_unused)
{
struct hybrid_node *nodes, *n;
u32 nr, i;
/* nr nodes */
if (do_read_u32(ff, &nr))
return -1;
nodes = zalloc(sizeof(*nodes) * nr);
if (!nodes)
return -ENOMEM;
for (i = 0; i < nr; i++) {
n = &nodes[i];
n->pmu_name = do_read_string(ff);
if (!n->pmu_name)
goto error;
n->cpus = do_read_string(ff);
if (!n->cpus)
goto error;
}
ff->ph->env.nr_hybrid_nodes = nr;
ff->ph->env.hybrid_nodes = nodes;
return 0;
error:
for (i = 0; i < nr; i++) {
free(nodes[i].pmu_name);
free(nodes[i].cpus);
}
free(nodes);
return -1;
}
static int process_dir_format(struct feat_fd *ff,
void *_data __maybe_unused)
{
struct perf_session *session;
struct perf_data *data;
session = container_of(ff->ph, struct perf_session, header);
data = session->data;
if (WARN_ON(!perf_data__is_dir(data)))
return -1;
return do_read_u64(ff, &data->dir.version);
}
#ifdef HAVE_LIBBPF_SUPPORT
static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
{
struct bpf_prog_info_node *info_node;
struct perf_env *env = &ff->ph->env;
struct perf_bpil *info_linear;
u32 count, i;
int err = -1;
if (ff->ph->needs_swap) {
pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
return 0;
}
if (do_read_u32(ff, &count))
return -1;
down_write(&env->bpf_progs.lock);
for (i = 0; i < count; ++i) {
u32 info_len, data_len;
info_linear = NULL;
info_node = NULL;
if (do_read_u32(ff, &info_len))
goto out;
if (do_read_u32(ff, &data_len))
goto out;
if (info_len > sizeof(struct bpf_prog_info)) {
pr_warning("detected invalid bpf_prog_info\n");
goto out;
}
info_linear = malloc(sizeof(struct perf_bpil) +
data_len);
if (!info_linear)
goto out;
info_linear->info_len = sizeof(struct bpf_prog_info);
info_linear->data_len = data_len;
if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
goto out;
if (__do_read(ff, &info_linear->info, info_len))
goto out;
if (info_len < sizeof(struct bpf_prog_info))
memset(((void *)(&info_linear->info)) + info_len, 0,
sizeof(struct bpf_prog_info) - info_len);
if (__do_read(ff, info_linear->data, data_len))
goto out;
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (!info_node)
goto out;
/* after reading from file, translate offset to address */
bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear;
perf_env__insert_bpf_prog_info(env, info_node);
}
up_write(&env->bpf_progs.lock);
return 0;
out:
free(info_linear);
free(info_node);
up_write(&env->bpf_progs.lock);
return err;
}
static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_env *env = &ff->ph->env;
struct btf_node *node = NULL;
u32 count, i;
int err = -1;
if (ff->ph->needs_swap) {
pr_warning("interpreting btf from systems with endianness is not yet supported\n");
return 0;
}
if (do_read_u32(ff, &count))
return -1;
down_write(&env->bpf_progs.lock);
for (i = 0; i < count; ++i) {
u32 id, data_size;
if (do_read_u32(ff, &id))
goto out;
if (do_read_u32(ff, &data_size))
goto out;
node = malloc(sizeof(struct btf_node) + data_size);
if (!node)
goto out;
node->id = id;
node->data_size = data_size;
if (__do_read(ff, node->data, data_size))
goto out;
perf_env__insert_btf(env, node);
node = NULL;
}
err = 0;
out:
up_write(&env->bpf_progs.lock);
free(node);
return err;
}
#endif // HAVE_LIBBPF_SUPPORT
static int process_compressed(struct feat_fd *ff,
void *data __maybe_unused)
{
if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
return -1;
if (do_read_u32(ff, &(ff->ph->env.comp_type)))
return -1;
if (do_read_u32(ff, &(ff->ph->env.comp_level)))
return -1;
if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
return -1;
if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
return -1;
return 0;
}
static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
char ***caps, unsigned int *max_branches)
{
char *name, *value, *ptr;
u32 nr_pmu_caps, i;
*nr_caps = 0;
*caps = NULL;
if (do_read_u32(ff, &nr_pmu_caps))
return -1;
if (!nr_pmu_caps)
return 0;
*caps = zalloc(sizeof(char *) * nr_pmu_caps);
if (!*caps)
return -1;
for (i = 0; i < nr_pmu_caps; i++) {
name = do_read_string(ff);
if (!name)
goto error;
value = do_read_string(ff);
if (!value)
goto free_name;
if (asprintf(&ptr, "%s=%s", name, value) < 0)
goto free_value;
(*caps)[i] = ptr;
if (!strcmp(name, "branches"))
*max_branches = atoi(value);
free(value);
free(name);
}
*nr_caps = nr_pmu_caps;
return 0;
free_value:
free(value);
free_name:
free(name);
error:
for (; i > 0; i--)
free((*caps)[i - 1]);
free(*caps);
*caps = NULL;
*nr_caps = 0;
return -1;
}
static int process_cpu_pmu_caps(struct feat_fd *ff,
void *data __maybe_unused)
{
int ret = __process_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
&ff->ph->env.cpu_pmu_caps,
&ff->ph->env.max_branches);
if (!ret && !ff->ph->env.cpu_pmu_caps)
pr_debug("cpu pmu capabilities not available\n");
return ret;
}
static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
{
struct pmu_caps *pmu_caps;
u32 nr_pmu, i;
int ret;
int j;
if (do_read_u32(ff, &nr_pmu))
return -1;
if (!nr_pmu) {
pr_debug("pmu capabilities not available\n");
return 0;
}
pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
if (!pmu_caps)
return -ENOMEM;
for (i = 0; i < nr_pmu; i++) {
ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps,
&pmu_caps[i].caps,
&pmu_caps[i].max_branches);
if (ret)
goto err;
pmu_caps[i].pmu_name = do_read_string(ff);
if (!pmu_caps[i].pmu_name) {
ret = -1;
goto err;
}
if (!pmu_caps[i].nr_caps) {
pr_debug("%s pmu capabilities not available\n",
pmu_caps[i].pmu_name);
}
}
ff->ph->env.nr_pmus_with_caps = nr_pmu;
ff->ph->env.pmu_caps = pmu_caps;
return 0;
err:
for (i = 0; i < nr_pmu; i++) {
for (j = 0; j < pmu_caps[i].nr_caps; j++)
free(pmu_caps[i].caps[j]);
free(pmu_caps[i].caps);
free(pmu_caps[i].pmu_name);
}
free(pmu_caps);
return ret;
}
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
.write = write_##func, \
.print = print_##func, \
.full_only = __full_only, \
.process = process_##func, \
.synthesize = true \
}
#define FEAT_OPN(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
.write = write_##func, \
.print = print_##func, \
.full_only = __full_only, \
.process = process_##func \
}
/* feature_ops not implemented: */
#define print_tracing_data NULL
#define print_build_id NULL
#define process_branch_stack NULL
#define process_stat NULL
// Only used in util/synthetic-events.c
const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
#ifdef HAVE_LIBTRACEEVENT
FEAT_OPN(TRACING_DATA, tracing_data, false),
#endif
FEAT_OPN(BUILD_ID, build_id, false),
FEAT_OPR(HOSTNAME, hostname, false),
FEAT_OPR(OSRELEASE, osrelease, false),
FEAT_OPR(VERSION, version, false),
FEAT_OPR(ARCH, arch, false),
FEAT_OPR(NRCPUS, nrcpus, false),
FEAT_OPR(CPUDESC, cpudesc, false),
FEAT_OPR(CPUID, cpuid, false),
FEAT_OPR(TOTAL_MEM, total_mem, false),
FEAT_OPR(EVENT_DESC, event_desc, false),
FEAT_OPR(CMDLINE, cmdline, false),
FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
FEAT_OPN(BRANCH_STACK, branch_stack, false),
FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
FEAT_OPR(GROUP_DESC, group_desc, false),
FEAT_OPN(AUXTRACE, auxtrace, false),
FEAT_OPN(STAT, stat, false),
FEAT_OPN(CACHE, cache, true),
FEAT_OPR(SAMPLE_TIME, sample_time, false),
FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
FEAT_OPR(CLOCKID, clockid, false),
FEAT_OPN(DIR_FORMAT, dir_format, false),
#ifdef HAVE_LIBBPF_SUPPORT
FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
FEAT_OPR(BPF_BTF, bpf_btf, false),
#endif
FEAT_OPR(COMPRESSED, compressed, false),
FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
FEAT_OPR(CLOCK_DATA, clock_data, false),
FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
FEAT_OPR(PMU_CAPS, pmu_caps, false),
};
struct header_print_data {
FILE *fp;
bool full; /* extended list of headers */
};
static int perf_file_section__fprintf_info(struct perf_file_section *section,
struct perf_header *ph,
int feat, int fd, void *data)
{
struct header_print_data *hd = data;
struct feat_fd ff;
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
"%d, continuing...\n", section->offset, feat);
return 0;
}
if (feat >= HEADER_LAST_FEATURE) {
pr_warning("unknown feature %d\n", feat);
return 0;
}
if (!feat_ops[feat].print)
return 0;
ff = (struct feat_fd) {
.fd = fd,
.ph = ph,
};
if (!feat_ops[feat].full_only || hd->full)
feat_ops[feat].print(&ff, hd->fp);
else
fprintf(hd->fp, "# %s info available, use -I to display\n",
feat_ops[feat].name);
return 0;
}
int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
{
struct header_print_data hd;
struct perf_header *header = &session->header;
int fd = perf_data__fd(session->data);
struct stat st;
time_t stctime;
int ret, bit;
hd.fp = fp;
hd.full = full;
ret = fstat(fd, &st);
if (ret == -1)
return -1;
stctime = st.st_mtime;
fprintf(fp, "# captured on : %s", ctime(&stctime));
fprintf(fp, "# header version : %u\n", header->version);
fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
perf_header__process_sections(header, fd, &hd,
perf_file_section__fprintf_info);
if (session->data->is_pipe)
return 0;
fprintf(fp, "# missing features: ");
for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
if (bit)
fprintf(fp, "%s ", feat_ops[bit].name);
}
fprintf(fp, "\n");
return 0;
}
struct header_fw {
struct feat_writer fw;
struct feat_fd *ff;
};
static int feat_writer_cb(struct feat_writer *fw, void *buf, size_t sz)
{
struct header_fw *h = container_of(fw, struct header_fw, fw);
return do_write(h->ff, buf, sz);
}
static int do_write_feat(struct feat_fd *ff, int type,
struct perf_file_section **p,
struct evlist *evlist,
struct feat_copier *fc)
{
int err;
int ret = 0;
if (perf_header__has_feat(ff->ph, type)) {
if (!feat_ops[type].write)
return -1;
if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
return -1;
(*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
/*
* Hook to let perf inject copy features sections from the input
* file.
*/
if (fc && fc->copy) {
struct header_fw h = {
.fw.write = feat_writer_cb,
.ff = ff,
};
/* ->copy() returns 0 if the feature was not copied */
err = fc->copy(fc, type, &h.fw);
} else {
err = 0;
}
if (!err)
err = feat_ops[type].write(ff, evlist);
if (err < 0) {
pr_debug("failed to write feature %s\n", feat_ops[type].name);
/* undo anything written */
lseek(ff->fd, (*p)->offset, SEEK_SET);
return -1;
}
(*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
(*p)++;
}
return ret;
}
static int perf_header__adds_write(struct perf_header *header,
struct evlist *evlist, int fd,
struct feat_copier *fc)
{
int nr_sections;
struct feat_fd ff;
struct perf_file_section *feat_sec, *p;
int sec_size;
u64 sec_start;
int feat;
int err;
ff = (struct feat_fd){
.fd = fd,
.ph = header,
};
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
if (feat_sec == NULL)
return -ENOMEM;
sec_size = sizeof(*feat_sec) * nr_sections;
sec_start = header->feat_offset;
lseek(fd, sec_start + sec_size, SEEK_SET);
for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
if (do_write_feat(&ff, feat, &p, evlist, fc))
perf_header__clear_feat(header, feat);
}
lseek(fd, sec_start, SEEK_SET);
/*
* may write more than needed due to dropped feature, but
* this is okay, reader will skip the missing entries
*/
err = do_write(&ff, feat_sec, sec_size);
if (err < 0)
pr_debug("failed to write feature section\n");
free(feat_sec);
return err;
}
int perf_header__write_pipe(int fd)
{
struct perf_pipe_file_header f_header;
struct feat_fd ff;
int err;
ff = (struct feat_fd){ .fd = fd };
f_header = (struct perf_pipe_file_header){
.magic = PERF_MAGIC,
.size = sizeof(f_header),
};
err = do_write(&ff, &f_header, sizeof(f_header));
if (err < 0) {
pr_debug("failed to write perf pipe header\n");
return err;
}
return 0;
}
static int perf_session__do_write_header(struct perf_session *session,
struct evlist *evlist,
int fd, bool at_exit,
struct feat_copier *fc)
{
struct perf_file_header f_header;
struct perf_file_attr f_attr;
struct perf_header *header = &session->header;
struct evsel *evsel;
struct feat_fd ff;
u64 attr_offset;
int err;
ff = (struct feat_fd){ .fd = fd};
lseek(fd, sizeof(f_header), SEEK_SET);
evlist__for_each_entry(session->evlist, evsel) {
evsel->id_offset = lseek(fd, 0, SEEK_CUR);
err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
}
}
attr_offset = lseek(ff.fd, 0, SEEK_CUR);
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
/*
* We are likely in "perf inject" and have read
* from an older file. Update attr size so that
* reader gets the right offset to the ids.
*/
evsel->core.attr.size = sizeof(evsel->core.attr);
}
f_attr = (struct perf_file_attr){
.attr = evsel->core.attr,
.ids = {
.offset = evsel->id_offset,
.size = evsel->core.ids * sizeof(u64),
}
};
err = do_write(&ff, &f_attr, sizeof(f_attr));
if (err < 0) {
pr_debug("failed to write perf header attribute\n");
return err;
}
}
if (!header->data_offset)
header->data_offset = lseek(fd, 0, SEEK_CUR);
header->feat_offset = header->data_offset + header->data_size;
if (at_exit) {
err = perf_header__adds_write(header, evlist, fd, fc);
if (err < 0)
return err;
}
f_header = (struct perf_file_header){
.magic = PERF_MAGIC,
.size = sizeof(f_header),
.attr_size = sizeof(f_attr),
.attrs = {
.offset = attr_offset,
.size = evlist->core.nr_entries * sizeof(f_attr),
},
.data = {
.offset = header->data_offset,
.size = header->data_size,
},
/* event_types is ignored, store zeros */
};
memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
lseek(fd, 0, SEEK_SET);
err = do_write(&ff, &f_header, sizeof(f_header));
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
}
lseek(fd, header->data_offset + header->data_size, SEEK_SET);
return 0;
}
int perf_session__write_header(struct perf_session *session,
struct evlist *evlist,
int fd, bool at_exit)
{
return perf_session__do_write_header(session, evlist, fd, at_exit, NULL);
}
size_t perf_session__data_offset(const struct evlist *evlist)
{
struct evsel *evsel;
size_t data_offset;
data_offset = sizeof(struct perf_file_header);
evlist__for_each_entry(evlist, evsel) {
data_offset += evsel->core.ids * sizeof(u64);
}
data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr);
return data_offset;
}
int perf_session__inject_header(struct perf_session *session,
struct evlist *evlist,
int fd,
struct feat_copier *fc)
{
return perf_session__do_write_header(session, evlist, fd, true, fc);
}
static int perf_header__getbuffer64(struct perf_header *header,
int fd, void *buf, size_t size)
{
if (readn(fd, buf, size) <= 0)
return -1;
if (header->needs_swap)
mem_bswap_64(buf, size);
return 0;
}
int perf_header__process_sections(struct perf_header *header, int fd,
void *data,
int (*process)(struct perf_file_section *section,
struct perf_header *ph,
int feat, int fd, void *data))
{
struct perf_file_section *feat_sec, *sec;
int nr_sections;
int sec_size;
int feat;
int err;
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
if (!feat_sec)
return -1;
sec_size = sizeof(*feat_sec) * nr_sections;
lseek(fd, header->feat_offset, SEEK_SET);
err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
if (err < 0)
goto out_free;
for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
err = process(sec++, header, feat, fd, data);
if (err < 0)
goto out_free;
}
err = 0;
out_free:
free(feat_sec);
return err;
}
static const int attr_file_abi_sizes[] = {
[0] = PERF_ATTR_SIZE_VER0,
[1] = PERF_ATTR_SIZE_VER1,
[2] = PERF_ATTR_SIZE_VER2,
[3] = PERF_ATTR_SIZE_VER3,
[4] = PERF_ATTR_SIZE_VER4,
0,
};
/*
* In the legacy file format, the magic number is not used to encode endianness.
* hdr_sz was used to encode endianness. But given that hdr_sz can vary based
* on ABI revisions, we need to try all combinations for all endianness to
* detect the endianness.
*/
static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
{
uint64_t ref_size, attr_size;
int i;
for (i = 0 ; attr_file_abi_sizes[i]; i++) {
ref_size = attr_file_abi_sizes[i]
+ sizeof(struct perf_file_section);
if (hdr_sz != ref_size) {
attr_size = bswap_64(hdr_sz);
if (attr_size != ref_size)
continue;
ph->needs_swap = true;
}
pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
i,
ph->needs_swap);
return 0;
}
/* could not determine endianness */
return -1;
}
#define PERF_PIPE_HDR_VER0 16
static const size_t attr_pipe_abi_sizes[] = {
[0] = PERF_PIPE_HDR_VER0,
0,
};
/*
* In the legacy pipe format, there is an implicit assumption that endianness
* between host recording the samples, and host parsing the samples is the
* same. This is not always the case given that the pipe output may always be
* redirected into a file and analyzed on a different machine with possibly a
* different endianness and perf_event ABI revisions in the perf tool itself.
*/
static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
{
u64 attr_size;
int i;
for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
if (hdr_sz != attr_pipe_abi_sizes[i]) {
attr_size = bswap_64(hdr_sz);
if (attr_size != hdr_sz)
continue;
ph->needs_swap = true;
}
pr_debug("Pipe ABI%d perf.data file detected\n", i);
return 0;
}
return -1;
}
bool is_perf_magic(u64 magic)
{
if (!memcmp(&magic, __perf_magic1, sizeof(magic))
|| magic == __perf_magic2
|| magic == __perf_magic2_sw)
return true;
return false;
}
static int check_magic_endian(u64 magic, uint64_t hdr_sz,
bool is_pipe, struct perf_header *ph)
{
int ret;
/* check for legacy format */
ret = memcmp(&magic, __perf_magic1, sizeof(magic));
if (ret == 0) {
ph->version = PERF_HEADER_VERSION_1;
pr_debug("legacy perf.data format\n");
if (is_pipe)
return try_all_pipe_abis(hdr_sz, ph);
return try_all_file_abis(hdr_sz, ph);
}
/*
* the new magic number serves two purposes:
* - unique number to identify actual perf.data files
* - encode endianness of file
*/
ph->version = PERF_HEADER_VERSION_2;
/* check magic number with one endianness */
if (magic == __perf_magic2)
return 0;
/* check magic number with opposite endianness */
if (magic != __perf_magic2_sw)
return -1;
ph->needs_swap = true;
return 0;
}
int perf_file_header__read(struct perf_file_header *header,
struct perf_header *ph, int fd)
{
ssize_t ret;
lseek(fd, 0, SEEK_SET);
ret = readn(fd, header, sizeof(*header));
if (ret <= 0)
return -1;
if (check_magic_endian(header->magic,
header->attr_size, false, ph) < 0) {
pr_debug("magic/endian check failed\n");
return -1;
}
if (ph->needs_swap) {
mem_bswap_64(header, offsetof(struct perf_file_header,
adds_features));
}
if (header->size != sizeof(*header)) {
/* Support the previous format */
if (header->size == offsetof(typeof(*header), adds_features))
bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
else
return -1;
} else if (ph->needs_swap) {
/*
* feature bitmap is declared as an array of unsigned longs --
* not good since its size can differ between the host that
* generated the data file and the host analyzing the file.
*
* We need to handle endianness, but we don't know the size of
* the unsigned long where the file was generated. Take a best
* guess at determining it: try 64-bit swap first (ie., file
* created on a 64-bit host), and check if the hostname feature
* bit is set (this feature bit is forced on as of fbe96f2).
* If the bit is not, undo the 64-bit swap and try a 32-bit
* swap. If the hostname bit is still not set (e.g., older data
* file), punt and fallback to the original behavior --
* clearing all feature bits and setting buildid.
*/
mem_bswap_64(&header->adds_features,
BITS_TO_U64(HEADER_FEAT_BITS));
if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
/* unswap as u64 */
mem_bswap_64(&header->adds_features,
BITS_TO_U64(HEADER_FEAT_BITS));
/* unswap as u32 */
mem_bswap_32(&header->adds_features,
BITS_TO_U32(HEADER_FEAT_BITS));
}
if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
__set_bit(HEADER_BUILD_ID, header->adds_features);
}
}
memcpy(&ph->adds_features, &header->adds_features,
sizeof(ph->adds_features));
ph->data_offset = header->data.offset;
ph->data_size = header->data.size;
ph->feat_offset = header->data.offset + header->data.size;
return 0;
}
static int perf_file_section__process(struct perf_file_section *section,
struct perf_header *ph,
int feat, int fd, void *data)
{
struct feat_fd fdd = {
.fd = fd,
.ph = ph,
.size = section->size,
.offset = section->offset,
};
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
"%d, continuing...\n", section->offset, feat);
return 0;
}
if (feat >= HEADER_LAST_FEATURE) {
pr_debug("unknown feature %d, continuing...\n", feat);
return 0;
}
if (!feat_ops[feat].process)
return 0;
return feat_ops[feat].process(&fdd, data);
}
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
struct perf_header *ph,
struct perf_data* data,
bool repipe, int repipe_fd)
{
struct feat_fd ff = {
.fd = repipe_fd,
.ph = ph,
};
ssize_t ret;
ret = perf_data__read(data, header, sizeof(*header));
if (ret <= 0)
return -1;
if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
pr_debug("endian/magic failed\n");
return -1;
}
if (ph->needs_swap)
header->size = bswap_64(header->size);
if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
return -1;
return 0;
}
static int perf_header__read_pipe(struct perf_session *session, int repipe_fd)
{
struct perf_header *header = &session->header;
struct perf_pipe_file_header f_header;
if (perf_file_header__read_pipe(&f_header, header, session->data,
session->repipe, repipe_fd) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
}
return f_header.size == sizeof(f_header) ? 0 : -1;
}
static int read_attr(int fd, struct perf_header *ph,
struct perf_file_attr *f_attr)
{
struct perf_event_attr *attr = &f_attr->attr;
size_t sz, left;
size_t our_sz = sizeof(f_attr->attr);
ssize_t ret;
memset(f_attr, 0, sizeof(*f_attr));
/* read minimal guaranteed structure */
ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
if (ret <= 0) {
pr_debug("cannot read %d bytes of header attr\n",
PERF_ATTR_SIZE_VER0);
return -1;
}
/* on file perf_event_attr size */
sz = attr->size;
if (ph->needs_swap)
sz = bswap_32(sz);
if (sz == 0) {
/* assume ABI0 */
sz = PERF_ATTR_SIZE_VER0;
} else if (sz > our_sz) {
pr_debug("file uses a more recent and unsupported ABI"
" (%zu bytes extra)\n", sz - our_sz);
return -1;
}
/* what we have not yet read and that we know about */
left = sz - PERF_ATTR_SIZE_VER0;
if (left) {
void *ptr = attr;
ptr += PERF_ATTR_SIZE_VER0;
ret = readn(fd, ptr, left);
}
/* read perf_file_section, ids are read in caller */
ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
return ret <= 0 ? -1 : 0;
}
#ifdef HAVE_LIBTRACEEVENT
static int evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent)
{
struct tep_event *event;
char bf[128];
/* already prepared */
if (evsel->tp_format)
return 0;
if (pevent == NULL) {
pr_debug("broken or missing trace data\n");
return -1;
}
event = tep_find_event(pevent, evsel->core.attr.config);
if (event == NULL) {
pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
return -1;
}
if (!evsel->name) {
snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
evsel->name = strdup(bf);
if (evsel->name == NULL)
return -1;
}
evsel->tp_format = event;
return 0;
}
static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent)
{
struct evsel *pos;
evlist__for_each_entry(evlist, pos) {
if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
evsel__prepare_tracepoint_event(pos, pevent))
return -1;
}
return 0;
}
#endif
int perf_session__read_header(struct perf_session *session, int repipe_fd)
{
struct perf_data *data = session->data;
struct perf_header *header = &session->header;
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j, err;
int fd = perf_data__fd(data);
session->evlist = evlist__new();
if (session->evlist == NULL)
return -ENOMEM;
session->evlist->env = &header->env;
session->machines.host.env = &header->env;
/*
* We can read 'pipe' data event from regular file,
* check for the pipe header regardless of source.
*/
err = perf_header__read_pipe(session, repipe_fd);
if (!err || perf_data__is_pipe(data)) {
data->is_pipe = true;
return err;
}
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
if (header->needs_swap && data->in_place_update) {
pr_err("In-place update not supported when byte-swapping is required\n");
return -EINVAL;
}
/*
* Sanity check that perf.data was written cleanly; data size is
* initialized to 0 and updated only if the on_exit function is run.
* If data size is still 0 then the file contains only partial
* information. Just warn user and process it as much as it can.
*/
if (f_header.data.size == 0) {
pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
"Was the 'perf record' command properly terminated?\n",
data->file.path);
}
if (f_header.attr_size == 0) {
pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
"Was the 'perf record' command properly terminated?\n",
data->file.path);
return -EINVAL;
}
nr_attrs = f_header.attrs.size / f_header.attr_size;
lseek(fd, f_header.attrs.offset, SEEK_SET);
for (i = 0; i < nr_attrs; i++) {
struct evsel *evsel;
off_t tmp;
if (read_attr(fd, header, &f_attr) < 0)
goto out_errno;
if (header->needs_swap) {
f_attr.ids.size = bswap_64(f_attr.ids.size);
f_attr.ids.offset = bswap_64(f_attr.ids.offset);
perf_event__attr_swap(&f_attr.attr);
}
tmp = lseek(fd, 0, SEEK_CUR);
evsel = evsel__new(&f_attr.attr);
if (evsel == NULL)
goto out_delete_evlist;
evsel->needs_swap = header->needs_swap;
/*
* Do it before so that if perf_evsel__alloc_id fails, this
* entry gets purged too at evlist__delete().
*/
evlist__add(session->evlist, evsel);
nr_ids = f_attr.ids.size / sizeof(u64);
/*
* We don't have the cpu and thread maps on the header, so
* for allocating the perf_sample_id table we fake 1 cpu and
* hattr->ids threads.
*/
if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
goto out_delete_evlist;
lseek(fd, f_attr.ids.offset, SEEK_SET);
for (j = 0; j < nr_ids; j++) {
if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
goto out_errno;
perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
}
lseek(fd, tmp, SEEK_SET);
}
#ifdef HAVE_LIBTRACEEVENT
perf_header__process_sections(header, fd, &session->tevent,
perf_file_section__process);
if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent))
goto out_delete_evlist;
#else
perf_header__process_sections(header, fd, NULL, perf_file_section__process);
#endif
return 0;
out_errno:
return -errno;
out_delete_evlist:
evlist__delete(session->evlist);
session->evlist = NULL;
return -ENOMEM;
}
int perf_event__process_feature(struct perf_session *session,
union perf_event *event)
{
struct perf_tool *tool = session->tool;
struct feat_fd ff = { .fd = 0 };
struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
int type = fe->header.type;
u64 feat = fe->feat_id;
int ret = 0;
if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
pr_warning("invalid record type %d in pipe-mode\n", type);
return 0;
}
if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
pr_warning("invalid record type %d in pipe-mode\n", type);
return -1;
}
if (!feat_ops[feat].process)
return 0;
ff.buf = (void *)fe->data;
ff.size = event->header.size - sizeof(*fe);
ff.ph = &session->header;
if (feat_ops[feat].process(&ff, NULL)) {
ret = -1;
goto out;
}
if (!feat_ops[feat].print || !tool->show_feat_hdr)
goto out;
if (!feat_ops[feat].full_only ||
tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
feat_ops[feat].print(&ff, stdout);
} else {
fprintf(stdout, "# %s info available, use -I to display\n",
feat_ops[feat].name);
}
out:
free_event_desc(ff.events);
return ret;
}
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
{
struct perf_record_event_update *ev = &event->event_update;
struct perf_cpu_map *map;
size_t ret;
ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
switch (ev->type) {
case PERF_EVENT_UPDATE__SCALE:
ret += fprintf(fp, "... scale: %f\n", ev->scale.scale);
break;
case PERF_EVENT_UPDATE__UNIT:
ret += fprintf(fp, "... unit: %s\n", ev->unit);
break;
case PERF_EVENT_UPDATE__NAME:
ret += fprintf(fp, "... name: %s\n", ev->name);
break;
case PERF_EVENT_UPDATE__CPUS:
ret += fprintf(fp, "... ");
map = cpu_map__new_data(&ev->cpus.cpus);
if (map)
ret += cpu_map__fprintf(map, fp);
else
ret += fprintf(fp, "failed to get cpus\n");
break;
default:
ret += fprintf(fp, "... unknown type\n");
break;
}
return ret;
}
int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
u32 i, n_ids;
u64 *ids;
struct evsel *evsel;
struct evlist *evlist = *pevlist;
if (evlist == NULL) {
*pevlist = evlist = evlist__new();
if (evlist == NULL)
return -ENOMEM;
}
evsel = evsel__new(&event->attr.attr);
if (evsel == NULL)
return -ENOMEM;
evlist__add(evlist, evsel);
n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
n_ids = n_ids / sizeof(u64);
/*
* We don't have the cpu and thread maps on the header, so
* for allocating the perf_sample_id table we fake 1 cpu and
* hattr->ids threads.
*/
if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
return -ENOMEM;
ids = perf_record_header_attr_id(event);
for (i = 0; i < n_ids; i++) {
perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
}
return 0;
}
int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
struct perf_record_event_update *ev = &event->event_update;
struct evlist *evlist;
struct evsel *evsel;
struct perf_cpu_map *map;
if (dump_trace)
perf_event__fprintf_event_update(event, stdout);
if (!pevlist || *pevlist == NULL)
return -EINVAL;
evlist = *pevlist;
evsel = evlist__id2evsel(evlist, ev->id);
if (evsel == NULL)
return -EINVAL;
switch (ev->type) {
case PERF_EVENT_UPDATE__UNIT:
free((char *)evsel->unit);
evsel->unit = strdup(ev->unit);
break;
case PERF_EVENT_UPDATE__NAME:
free(evsel->name);
evsel->name = strdup(ev->name);
break;
case PERF_EVENT_UPDATE__SCALE:
evsel->scale = ev->scale.scale;
break;
case PERF_EVENT_UPDATE__CPUS:
map = cpu_map__new_data(&ev->cpus.cpus);
if (map) {
perf_cpu_map__put(evsel->core.own_cpus);
evsel->core.own_cpus = map;
} else
pr_err("failed to get event_update cpus\n");
default:
break;
}
return 0;
}
#ifdef HAVE_LIBTRACEEVENT
int perf_event__process_tracing_data(struct perf_session *session,
union perf_event *event)
{
ssize_t size_read, padding, size = event->tracing_data.size;
int fd = perf_data__fd(session->data);
char buf[BUFSIZ];
/*
* The pipe fd is already in proper place and in any case
* we can't move it, and we'd screw the case where we read
* 'pipe' data from regular file. The trace_report reads
* data from 'fd' so we need to set it directly behind the
* event, where the tracing data starts.
*/
if (!perf_data__is_pipe(session->data)) {
off_t offset = lseek(fd, 0, SEEK_CUR);
/* setup for reading amidst mmap */
lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
SEEK_SET);
}
size_read = trace_report(fd, &session->tevent,
session->repipe);
padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
if (readn(fd, buf, padding) < 0) {
pr_err("%s: reading input file", __func__);
return -1;
}
if (session->repipe) {
int retw = write(STDOUT_FILENO, buf, padding);
if (retw <= 0 || retw != padding) {
pr_err("%s: repiping tracing data padding", __func__);
return -1;
}
}
if (size_read + padding != size) {
pr_err("%s: tracing data size mismatch", __func__);
return -1;
}
evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent);
return size_read + padding;
}
#endif
int perf_event__process_build_id(struct perf_session *session,
union perf_event *event)
{
__event_process_build_id(&event->build_id,
event->build_id.filename,
session);
return 0;
}
| linux-master | tools/perf/util/header.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <stdlib.h>
#include <bpf/bpf.h>
#include <linux/err.h>
#include <internal/xyarray.h>
#include "util/debug.h"
#include "util/evsel.h"
#include "util/bpf-filter.h"
#include <util/bpf-filter-flex.h>
#include <util/bpf-filter-bison.h>
#include "bpf_skel/sample-filter.h"
#include "bpf_skel/sample_filter.skel.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
#define __PERF_SAMPLE_TYPE(st, opt) { st, #st, opt }
#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PERF_SAMPLE_##_st, opt)
static const struct perf_sample_info {
u64 type;
const char *name;
const char *option;
} sample_table[] = {
/* default sample flags */
PERF_SAMPLE_TYPE(IP, NULL),
PERF_SAMPLE_TYPE(TID, NULL),
PERF_SAMPLE_TYPE(PERIOD, NULL),
/* flags mostly set by default, but still have options */
PERF_SAMPLE_TYPE(ID, "--sample-identifier"),
PERF_SAMPLE_TYPE(CPU, "--sample-cpu"),
PERF_SAMPLE_TYPE(TIME, "-T"),
/* optional sample flags */
PERF_SAMPLE_TYPE(ADDR, "-d"),
PERF_SAMPLE_TYPE(DATA_SRC, "-d"),
PERF_SAMPLE_TYPE(PHYS_ADDR, "--phys-data"),
PERF_SAMPLE_TYPE(WEIGHT, "-W"),
PERF_SAMPLE_TYPE(WEIGHT_STRUCT, "-W"),
PERF_SAMPLE_TYPE(TRANSACTION, "--transaction"),
PERF_SAMPLE_TYPE(CODE_PAGE_SIZE, "--code-page-size"),
PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
};
static const struct perf_sample_info *get_sample_info(u64 flags)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(sample_table); i++) {
if (sample_table[i].type == flags)
return &sample_table[i];
}
return NULL;
}
static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *expr)
{
const struct perf_sample_info *info;
if (evsel->core.attr.sample_type & expr->sample_flags)
return 0;
if (expr->op == PBF_OP_GROUP_BEGIN) {
struct perf_bpf_filter_expr *group;
list_for_each_entry(group, &expr->groups, list) {
if (check_sample_flags(evsel, group) < 0)
return -1;
}
return 0;
}
info = get_sample_info(expr->sample_flags);
if (info == NULL) {
pr_err("Error: %s event does not have sample flags %lx\n",
evsel__name(evsel), expr->sample_flags);
return -1;
}
pr_err("Error: %s event does not have %s\n", evsel__name(evsel), info->name);
if (info->option)
pr_err(" Hint: please add %s option to perf record\n", info->option);
return -1;
}
int perf_bpf_filter__prepare(struct evsel *evsel)
{
int i, x, y, fd;
struct sample_filter_bpf *skel;
struct bpf_program *prog;
struct bpf_link *link;
struct perf_bpf_filter_expr *expr;
skel = sample_filter_bpf__open_and_load();
if (!skel) {
pr_err("Failed to load perf sample-filter BPF skeleton\n");
return -1;
}
i = 0;
fd = bpf_map__fd(skel->maps.filters);
list_for_each_entry(expr, &evsel->bpf_filters, list) {
struct perf_bpf_filter_entry entry = {
.op = expr->op,
.part = expr->part,
.flags = expr->sample_flags,
.value = expr->val,
};
if (check_sample_flags(evsel, expr) < 0)
return -1;
bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
i++;
if (expr->op == PBF_OP_GROUP_BEGIN) {
struct perf_bpf_filter_expr *group;
list_for_each_entry(group, &expr->groups, list) {
struct perf_bpf_filter_entry group_entry = {
.op = group->op,
.part = group->part,
.flags = group->sample_flags,
.value = group->val,
};
bpf_map_update_elem(fd, &i, &group_entry, BPF_ANY);
i++;
}
memset(&entry, 0, sizeof(entry));
entry.op = PBF_OP_GROUP_END;
bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
i++;
}
}
if (i > MAX_FILTERS) {
pr_err("Too many filters: %d (max = %d)\n", i, MAX_FILTERS);
return -1;
}
prog = skel->progs.perf_sample_filter;
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
link = bpf_program__attach_perf_event(prog, FD(evsel, x, y));
if (IS_ERR(link)) {
pr_err("Failed to attach perf sample-filter program\n");
return PTR_ERR(link);
}
}
}
evsel->bpf_skel = skel;
return 0;
}
int perf_bpf_filter__destroy(struct evsel *evsel)
{
struct perf_bpf_filter_expr *expr, *tmp;
list_for_each_entry_safe(expr, tmp, &evsel->bpf_filters, list) {
list_del(&expr->list);
free(expr);
}
sample_filter_bpf__destroy(evsel->bpf_skel);
return 0;
}
u64 perf_bpf_filter__lost_count(struct evsel *evsel)
{
struct sample_filter_bpf *skel = evsel->bpf_skel;
return skel ? skel->bss->dropped : 0;
}
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags, int part,
enum perf_bpf_filter_op op,
unsigned long val)
{
struct perf_bpf_filter_expr *expr;
expr = malloc(sizeof(*expr));
if (expr != NULL) {
expr->sample_flags = sample_flags;
expr->part = part;
expr->op = op;
expr->val = val;
INIT_LIST_HEAD(&expr->groups);
}
return expr;
}
int perf_bpf_filter__parse(struct list_head *expr_head, const char *str)
{
YY_BUFFER_STATE buffer;
int ret;
buffer = perf_bpf_filter__scan_string(str);
ret = perf_bpf_filter_parse(expr_head);
perf_bpf_filter__flush_buffer(buffer);
perf_bpf_filter__delete_buffer(buffer);
perf_bpf_filter_lex_destroy();
return ret;
}
| linux-master | tools/perf/util/bpf-filter.c |
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "util/debug.h"
#include "util/parse-sublevel-options.h"
static int parse_one_sublevel_option(const char *str,
struct sublevel_option *opts)
{
struct sublevel_option *opt = opts;
char *vstr, *s = strdup(str);
int v = 1;
if (!s) {
pr_err("no memory\n");
return -1;
}
vstr = strchr(s, '=');
if (vstr)
*vstr++ = 0;
while (opt->name) {
if (!strcmp(s, opt->name))
break;
opt++;
}
if (!opt->name) {
pr_err("Unknown option name '%s'\n", s);
free(s);
return -1;
}
if (vstr)
v = atoi(vstr);
*opt->value_ptr = v;
free(s);
return 0;
}
/* parse options like --foo a=<n>,b,c... */
int perf_parse_sublevel_options(const char *str, struct sublevel_option *opts)
{
char *s = strdup(str);
char *p = NULL;
int ret;
if (!s) {
pr_err("no memory\n");
return -1;
}
p = strtok(s, ",");
while (p) {
ret = parse_one_sublevel_option(p, opts);
if (ret) {
free(s);
return ret;
}
p = strtok(NULL, ",");
}
free(s);
return 0;
}
| linux-master | tools/perf/util/parse-sublevel-options.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
/* Copyright (c) 2021 Google */
#include <assert.h>
#include <limits.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/err.h>
#include <linux/zalloc.h>
#include <linux/perf_event.h>
#include <api/fs/fs.h>
#include <perf/bpf_perf.h>
#include "affinity.h"
#include "bpf_counter.h"
#include "cgroup.h"
#include "counts.h"
#include "debug.h"
#include "evsel.h"
#include "evlist.h"
#include "target.h"
#include "cpumap.h"
#include "thread_map.h"
#include "bpf_skel/bperf_cgroup.skel.h"
static struct perf_event_attr cgrp_switch_attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CGROUP_SWITCHES,
.size = sizeof(cgrp_switch_attr),
.sample_period = 1,
.disabled = 1,
};
static struct evsel *cgrp_switch;
static struct bperf_cgroup_bpf *skel;
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
static int bperf_load_program(struct evlist *evlist)
{
struct bpf_link *link;
struct evsel *evsel;
struct cgroup *cgrp, *leader_cgrp;
int i, j;
struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
int map_size, map_fd;
int prog_fd, err;
skel = bperf_cgroup_bpf__open();
if (!skel) {
pr_err("Failed to open cgroup skeleton\n");
return -1;
}
skel->rodata->num_cpus = total_cpus;
skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
/* we need one copy of events per cpu for reading */
map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
bpf_map__set_max_entries(skel->maps.events, map_size);
bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
/* previous result is saved in a per-cpu array */
map_size = evlist->core.nr_entries / nr_cgroups;
bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
/* cgroup result needs all events (per-cpu) */
map_size = evlist->core.nr_entries;
bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
set_max_rlimit();
err = bperf_cgroup_bpf__load(skel);
if (err) {
pr_err("Failed to load cgroup skeleton\n");
goto out;
}
if (cgroup_is_v2("perf_event") > 0)
skel->bss->use_cgroup_v2 = 1;
err = -1;
cgrp_switch = evsel__new(&cgrp_switch_attr);
if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
pr_err("Failed to open cgroup switches event\n");
goto out;
}
perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
FD(cgrp_switch, i));
if (IS_ERR(link)) {
pr_err("Failed to attach cgroup program\n");
err = PTR_ERR(link);
goto out;
}
}
/*
* Update cgrp_idx map from cgroup-id to event index.
*/
cgrp = NULL;
i = 0;
evlist__for_each_entry(evlist, evsel) {
if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
leader_cgrp = evsel->cgrp;
evsel->cgrp = NULL;
/* open single copy of the events w/o cgroup */
err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
if (err == 0)
evsel->supported = true;
map_fd = bpf_map__fd(skel->maps.events);
perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
int fd = FD(evsel, j);
__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
}
evsel->cgrp = leader_cgrp;
}
if (evsel->cgrp == cgrp)
continue;
cgrp = evsel->cgrp;
if (read_cgroup_id(cgrp) < 0) {
pr_err("Failed to get cgroup id\n");
err = -1;
goto out;
}
map_fd = bpf_map__fd(skel->maps.cgrp_idx);
err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
if (err < 0) {
pr_err("Failed to update cgroup index map\n");
goto out;
}
i++;
}
/*
* bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
* whether the kernel support it
*/
prog_fd = bpf_program__fd(skel->progs.trigger_read);
err = bperf_trigger_reading(prog_fd, 0);
if (err) {
pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
"Therefore, --for-each-cgroup might show inaccurate readings\n");
err = 0;
}
out:
return err;
}
static int bperf_cgrp__load(struct evsel *evsel,
struct target *target __maybe_unused)
{
static bool bperf_loaded = false;
evsel->bperf_leader_prog_fd = -1;
evsel->bperf_leader_link_fd = -1;
if (!bperf_loaded && bperf_load_program(evsel->evlist))
return -1;
bperf_loaded = true;
/* just to bypass bpf_counter_skip() */
evsel->follower_skel = (struct bperf_follower_bpf *)skel;
return 0;
}
static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
int cpu __maybe_unused, int fd __maybe_unused)
{
/* nothing to do */
return 0;
}
/*
* trigger the leader prog on each cpu, so the cgrp_reading map could get
* the latest results.
*/
static int bperf_cgrp__sync_counters(struct evlist *evlist)
{
struct perf_cpu cpu;
int idx;
int prog_fd = bpf_program__fd(skel->progs.trigger_read);
perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
bperf_trigger_reading(prog_fd, cpu.cpu);
return 0;
}
static int bperf_cgrp__enable(struct evsel *evsel)
{
if (evsel->core.idx)
return 0;
bperf_cgrp__sync_counters(evsel->evlist);
skel->bss->enabled = 1;
return 0;
}
static int bperf_cgrp__disable(struct evsel *evsel)
{
if (evsel->core.idx)
return 0;
bperf_cgrp__sync_counters(evsel->evlist);
skel->bss->enabled = 0;
return 0;
}
static int bperf_cgrp__read(struct evsel *evsel)
{
struct evlist *evlist = evsel->evlist;
int total_cpus = cpu__max_cpu().cpu;
struct perf_counts_values *counts;
struct bpf_perf_event_value *values;
int reading_map_fd, err = 0;
if (evsel->core.idx)
return 0;
bperf_cgrp__sync_counters(evsel->evlist);
values = calloc(total_cpus, sizeof(*values));
if (values == NULL)
return -ENOMEM;
reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
evlist__for_each_entry(evlist, evsel) {
__u32 idx = evsel->core.idx;
int i;
struct perf_cpu cpu;
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
if (err) {
pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
idx, evsel__name(evsel), evsel->cgrp->name);
goto out;
}
perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
counts = perf_counts(evsel->counts, i, 0);
counts->val = values[cpu.cpu].counter;
counts->ena = values[cpu.cpu].enabled;
counts->run = values[cpu.cpu].running;
}
}
out:
free(values);
return err;
}
static int bperf_cgrp__destroy(struct evsel *evsel)
{
if (evsel->core.idx)
return 0;
bperf_cgroup_bpf__destroy(skel);
evsel__delete(cgrp_switch); // it'll destroy on_switch progs too
return 0;
}
struct bpf_counter_ops bperf_cgrp_ops = {
.load = bperf_cgrp__load,
.enable = bperf_cgrp__enable,
.disable = bperf_cgrp__disable,
.read = bperf_cgrp__read,
.install_pe = bperf_cgrp__install_pe,
.destroy = bperf_cgrp__destroy,
};
| linux-master | tools/perf/util/bpf_counter_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
#include "print_binary.h"
#include <linux/log2.h>
#include <linux/ctype.h>
int binary__fprintf(unsigned char *data, size_t len,
size_t bytes_per_line, binary__fprintf_t printer,
void *extra, FILE *fp)
{
size_t i, j, mask;
int printed = 0;
if (!printer)
return 0;
bytes_per_line = roundup_pow_of_two(bytes_per_line);
mask = bytes_per_line - 1;
printed += printer(BINARY_PRINT_DATA_BEGIN, 0, extra, fp);
for (i = 0; i < len; i++) {
if ((i & mask) == 0) {
printed += printer(BINARY_PRINT_LINE_BEGIN, -1, extra, fp);
printed += printer(BINARY_PRINT_ADDR, i, extra, fp);
}
printed += printer(BINARY_PRINT_NUM_DATA, data[i], extra, fp);
if (((i & mask) == mask) || i == len - 1) {
for (j = 0; j < mask-(i & mask); j++)
printed += printer(BINARY_PRINT_NUM_PAD, -1, extra, fp);
printer(BINARY_PRINT_SEP, i, extra, fp);
for (j = i & ~mask; j <= i; j++)
printed += printer(BINARY_PRINT_CHAR_DATA, data[j], extra, fp);
for (j = 0; j < mask-(i & mask); j++)
printed += printer(BINARY_PRINT_CHAR_PAD, i, extra, fp);
printed += printer(BINARY_PRINT_LINE_END, -1, extra, fp);
}
}
printed += printer(BINARY_PRINT_DATA_END, -1, extra, fp);
return printed;
}
int is_printable_array(char *p, unsigned int len)
{
unsigned int i;
if (!p || !len || p[len - 1] != 0)
return 0;
len--;
for (i = 0; i < len && p[i]; i++) {
if (!isprint(p[i]) && !isspace(p[i]))
return 0;
}
return 1;
}
| linux-master | tools/perf/util/print_binary.c |
// SPDX-License-Identifier: GPL-2.0
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
#include "evsel_config.h"
#include "parse-events.h"
#include <errno.h>
#include <limits.h>
#include <stdlib.h>
#include <api/fs/fs.h>
#include <subcmd/parse-options.h>
#include <perf/cpumap.h>
#include "cloexec.h"
#include "util/perf_api_probe.h"
#include "record.h"
#include "../perf-sys.h"
#include "topdown.h"
#include "map_symbol.h"
#include "mem-events.h"
/*
* evsel__config_leader_sampling() uses special rules for leader sampling.
* However, if the leader is an AUX area event, then assume the event to sample
* is the next event.
*/
static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
{
struct evsel *leader = evsel__leader(evsel);
if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader) ||
is_mem_loads_aux_event(leader)) {
evlist__for_each_entry(evlist, evsel) {
if (evsel__leader(evsel) == leader && evsel != evsel__leader(evsel))
return evsel;
}
}
return leader;
}
static u64 evsel__config_term_mask(struct evsel *evsel)
{
struct evsel_config_term *term;
struct list_head *config_terms = &evsel->config_terms;
u64 term_types = 0;
list_for_each_entry(term, config_terms, list) {
term_types |= 1 << term->type;
}
return term_types;
}
static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
{
struct perf_event_attr *attr = &evsel->core.attr;
struct evsel *leader = evsel__leader(evsel);
struct evsel *read_sampler;
u64 term_types, freq_mask;
if (!leader->sample_read)
return;
read_sampler = evsel__read_sampler(evsel, evlist);
if (evsel == read_sampler)
return;
term_types = evsel__config_term_mask(evsel);
/*
* Disable sampling for all group members except those with explicit
* config terms or the leader. In the case of an AUX area event, the 2nd
* event in the group is the one that 'leads' the sampling.
*/
freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
if ((term_types & freq_mask) == 0) {
attr->freq = 0;
attr->sample_freq = 0;
attr->sample_period = 0;
}
if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
attr->write_backward = 0;
/*
* We don't get a sample for slave events, we make them when delivering
* the group leader sample. Set the slave event to follow the master
* sample_type to ease up reporting.
* An AUX area event also has sample_type requirements, so also include
* the sample type bits from the leader's sample_type to cover that
* case.
*/
attr->sample_type = read_sampler->core.attr.sample_type |
leader->core.attr.sample_type;
}
void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callchain)
{
struct evsel *evsel;
bool use_sample_identifier = false;
bool use_comm_exec;
bool sample_id = opts->sample_id;
if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec();
evlist__for_each_entry(evlist, evsel) {
evsel__config(evsel, opts, callchain);
if (evsel->tracking && use_comm_exec)
evsel->core.attr.comm_exec = 1;
}
/* Configure leader sampling here now that the sample type is known */
evlist__for_each_entry(evlist, evsel)
evsel__config_leader_sampling(evsel, evlist);
if (opts->full_auxtrace || opts->sample_identifier) {
/*
* Need to be able to synthesize and parse selected events with
* arbitrary sample types, which requires always being able to
* match the id.
*/
use_sample_identifier = perf_can_sample_identifier();
sample_id = true;
} else if (evlist->core.nr_entries > 1) {
struct evsel *first = evlist__first(evlist);
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.sample_type == first->core.attr.sample_type)
continue;
use_sample_identifier = perf_can_sample_identifier();
break;
}
sample_id = true;
}
if (sample_id) {
evlist__for_each_entry(evlist, evsel)
evsel__set_sample_id(evsel, use_sample_identifier);
}
evlist__set_id_pos(evlist);
}
static int get_max_rate(unsigned int *rate)
{
return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
}
static int record_opts__config_freq(struct record_opts *opts)
{
bool user_freq = opts->user_freq != UINT_MAX;
bool user_interval = opts->user_interval != ULLONG_MAX;
unsigned int max_rate;
if (user_interval && user_freq) {
pr_err("cannot set frequency and period at the same time\n");
return -1;
}
if (user_interval)
opts->default_interval = opts->user_interval;
if (user_freq)
opts->freq = opts->user_freq;
/*
* User specified count overrides default frequency.
*/
if (opts->default_interval)
opts->freq = 0;
else if (opts->freq) {
opts->default_interval = opts->freq;
} else {
pr_err("frequency and count are zero, aborting\n");
return -1;
}
if (get_max_rate(&max_rate))
return 0;
/*
* User specified frequency is over current maximum.
*/
if (user_freq && (max_rate < opts->freq)) {
if (opts->strict_freq) {
pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
" Please use -F freq option with a lower value or consider\n"
" tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
max_rate);
return -1;
} else {
pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
" The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
" The kernel will lower it when perf's interrupts take too long.\n"
" Use --strict-freq to disable this throttling, refusing to record.\n",
max_rate, opts->freq, max_rate);
opts->freq = max_rate;
}
}
/*
* Default frequency is over current maximum.
*/
if (max_rate < opts->freq) {
pr_warning("Lowering default frequency rate from %u to %u.\n"
"Please consider tweaking "
"/proc/sys/kernel/perf_event_max_sample_rate.\n",
opts->freq, max_rate);
opts->freq = max_rate;
}
return 0;
}
int record_opts__config(struct record_opts *opts)
{
return record_opts__config_freq(opts);
}
bool evlist__can_select_event(struct evlist *evlist, const char *str)
{
struct evlist *temp_evlist;
struct evsel *evsel;
int err, fd;
struct perf_cpu cpu = { .cpu = 0 };
bool ret = false;
pid_t pid = -1;
temp_evlist = evlist__new();
if (!temp_evlist)
return false;
err = parse_event(temp_evlist, str);
if (err)
goto out_delete;
evsel = evlist__last(temp_evlist);
if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
if (cpus)
cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus);
} else {
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
}
while (1) {
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
if (pid == -1 && errno == EACCES) {
pid = 0;
continue;
}
goto out_delete;
}
break;
}
close(fd);
ret = true;
out_delete:
evlist__delete(temp_evlist);
return ret;
}
int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
{
unsigned int freq;
struct record_opts *opts = opt->value;
if (!str)
return -EINVAL;
if (strcasecmp(str, "max") == 0) {
if (get_max_rate(&freq)) {
pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
return -1;
}
pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
} else {
freq = atoi(str);
}
opts->user_freq = freq;
return 0;
}
| linux-master | tools/perf/util/record.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <linux/err.h>
#include <inttypes.h>
#include <math.h>
#include <string.h>
#include "counts.h"
#include "cpumap.h"
#include "debug.h"
#include "header.h"
#include "stat.h"
#include "session.h"
#include "target.h"
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "util/hashmap.h"
#include <linux/zalloc.h>
void update_stats(struct stats *stats, u64 val)
{
double delta;
stats->n++;
delta = val - stats->mean;
stats->mean += delta / stats->n;
stats->M2 += delta*(val - stats->mean);
if (val > stats->max)
stats->max = val;
if (val < stats->min)
stats->min = val;
}
double avg_stats(struct stats *stats)
{
return stats->mean;
}
/*
* http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
*
* (\Sum n_i^2) - ((\Sum n_i)^2)/n
* s^2 = -------------------------------
* n - 1
*
* http://en.wikipedia.org/wiki/Stddev
*
* The std dev of the mean is related to the std dev by:
*
* s
* s_mean = -------
* sqrt(n)
*
*/
double stddev_stats(struct stats *stats)
{
double variance, variance_mean;
if (stats->n < 2)
return 0.0;
variance = stats->M2 / (stats->n - 1);
variance_mean = variance / stats->n;
return sqrt(variance_mean);
}
double rel_stddev_stats(double stddev, double avg)
{
double pct = 0.0;
if (avg)
pct = 100.0 * stddev/avg;
return pct;
}
static void evsel__reset_aggr_stats(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
struct perf_stat_aggr *aggr = ps->aggr;
if (aggr)
memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
}
static void evsel__reset_stat_priv(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
init_stats(&ps->res_stats);
evsel__reset_aggr_stats(evsel);
}
static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
{
struct perf_stat_evsel *ps = evsel->stats;
if (ps == NULL)
return 0;
ps->nr_aggr = nr_aggr;
ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
if (ps->aggr == NULL)
return -ENOMEM;
return 0;
}
int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
return -1;
}
return 0;
}
static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
{
struct perf_stat_evsel *ps;
ps = zalloc(sizeof(*ps));
if (ps == NULL)
return -ENOMEM;
evsel->stats = ps;
if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
evsel->stats = NULL;
free(ps);
return -ENOMEM;
}
evsel__reset_stat_priv(evsel);
return 0;
}
static void evsel__free_stat_priv(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
if (ps) {
zfree(&ps->aggr);
zfree(&ps->group_data);
}
zfree(&evsel->stats);
}
static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
{
int cpu_map_nr = evsel__nr_cpus(evsel);
int nthreads = perf_thread_map__nr(evsel->core.threads);
struct perf_counts *counts;
counts = perf_counts__new(cpu_map_nr, nthreads);
if (counts)
evsel->prev_raw_counts = counts;
return counts ? 0 : -ENOMEM;
}
static void evsel__free_prev_raw_counts(struct evsel *evsel)
{
perf_counts__delete(evsel->prev_raw_counts);
evsel->prev_raw_counts = NULL;
}
static void evsel__reset_prev_raw_counts(struct evsel *evsel)
{
if (evsel->prev_raw_counts)
perf_counts__reset(evsel->prev_raw_counts);
}
static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
{
if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
evsel__alloc_counts(evsel) < 0 ||
(alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
return -ENOMEM;
return 0;
}
int evlist__alloc_stats(struct perf_stat_config *config,
struct evlist *evlist, bool alloc_raw)
{
struct evsel *evsel;
int nr_aggr = 0;
if (config && config->aggr_map)
nr_aggr = config->aggr_map->nr;
evlist__for_each_entry(evlist, evsel) {
if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
goto out_free;
}
return 0;
out_free:
evlist__free_stats(evlist);
return -1;
}
void evlist__free_stats(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
evsel__free_stat_priv(evsel);
evsel__free_counts(evsel);
evsel__free_prev_raw_counts(evsel);
}
}
void evlist__reset_stats(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
evsel__reset_stat_priv(evsel);
evsel__reset_counts(evsel);
}
}
void evlist__reset_aggr_stats(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
evsel__reset_aggr_stats(evsel);
}
void evlist__reset_prev_raw_counts(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
evsel__reset_prev_raw_counts(evsel);
}
static void evsel__copy_prev_raw_counts(struct evsel *evsel)
{
int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
for (int thread = 0; thread < nthreads; thread++) {
perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
*perf_counts(evsel->counts, idx, thread) =
*perf_counts(evsel->prev_raw_counts, idx, thread);
}
}
}
void evlist__copy_prev_raw_counts(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
evsel__copy_prev_raw_counts(evsel);
}
static void evsel__copy_res_stats(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
/*
* For GLOBAL aggregation mode, it updates the counts for each run
* in the evsel->stats.res_stats. See perf_stat_process_counter().
*/
*ps->aggr[0].counts.values = avg_stats(&ps->res_stats);
}
void evlist__copy_res_stats(struct perf_stat_config *config, struct evlist *evlist)
{
struct evsel *evsel;
if (config->aggr_mode != AGGR_GLOBAL)
return;
evlist__for_each_entry(evlist, evsel)
evsel__copy_res_stats(evsel);
}
static size_t pkg_id_hash(long __key, void *ctx __maybe_unused)
{
uint64_t *key = (uint64_t *) __key;
return *key & 0xffffffff;
}
static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused)
{
uint64_t *key1 = (uint64_t *) __key1;
uint64_t *key2 = (uint64_t *) __key2;
return *key1 == *key2;
}
static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
int cpu_map_idx, bool *skip)
{
struct hashmap *mask = counter->per_pkg_mask;
struct perf_cpu_map *cpus = evsel__cpus(counter);
struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
int s, d, ret = 0;
uint64_t *key;
*skip = false;
if (!counter->per_pkg)
return 0;
if (perf_cpu_map__empty(cpus))
return 0;
if (!mask) {
mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
if (IS_ERR(mask))
return -ENOMEM;
counter->per_pkg_mask = mask;
}
/*
* we do not consider an event that has not run as a good
* instance to mark a package as used (skip=1). Otherwise
* we may run into a situation where the first CPU in a package
* is not running anything, yet the second is, and this function
* would mark the package as used after the first CPU and would
* not read the values from the second CPU.
*/
if (!(vals->run && vals->ena))
return 0;
s = cpu__get_socket_id(cpu);
if (s < 0)
return -1;
/*
* On multi-die system, die_id > 0. On no-die system, die_id = 0.
* We use hashmap(socket, die) to check the used socket+die pair.
*/
d = cpu__get_die_id(cpu);
if (d < 0)
return -1;
key = malloc(sizeof(*key));
if (!key)
return -ENOMEM;
*key = (uint64_t)d << 32 | s;
if (hashmap__find(mask, key, NULL)) {
*skip = true;
free(key);
} else
ret = hashmap__add(mask, key, 1);
return ret;
}
static bool evsel__count_has_error(struct evsel *evsel,
struct perf_counts_values *count,
struct perf_stat_config *config)
{
/* the evsel was failed already */
if (evsel->err || evsel->counts->scaled == -1)
return true;
/* this is meaningful for CPU aggregation modes only */
if (config->aggr_mode == AGGR_GLOBAL)
return false;
/* it's considered ok when it actually ran */
if (count->ena != 0 && count->run != 0)
return false;
return true;
}
static int
process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
int cpu_map_idx, int thread,
struct perf_counts_values *count)
{
struct perf_stat_evsel *ps = evsel->stats;
static struct perf_counts_values zero;
bool skip = false;
if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
pr_err("failed to read per-pkg counter\n");
return -1;
}
if (skip)
count = &zero;
if (!evsel->snapshot)
evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
perf_counts_values__scale(count, config->scale, NULL);
if (config->aggr_mode == AGGR_THREAD) {
struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
/*
* Skip value 0 when enabling --per-thread globally,
* otherwise too many 0 output.
*/
if (count->val == 0 && config->system_wide)
return 0;
ps->aggr[thread].nr++;
aggr_counts->val += count->val;
aggr_counts->ena += count->ena;
aggr_counts->run += count->run;
return 0;
}
if (ps->aggr) {
struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
struct perf_stat_aggr *ps_aggr;
int i;
for (i = 0; i < ps->nr_aggr; i++) {
if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
continue;
ps_aggr = &ps->aggr[i];
ps_aggr->nr++;
/*
* When any result is bad, make them all to give consistent output
* in interval mode. But per-task counters can have 0 enabled time
* when some tasks are idle.
*/
if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
ps_aggr->counts.val = 0;
ps_aggr->counts.ena = 0;
ps_aggr->counts.run = 0;
ps_aggr->failed = true;
}
if (!ps_aggr->failed) {
ps_aggr->counts.val += count->val;
ps_aggr->counts.ena += count->ena;
ps_aggr->counts.run += count->run;
}
break;
}
}
return 0;
}
static int process_counter_maps(struct perf_stat_config *config,
struct evsel *counter)
{
int nthreads = perf_thread_map__nr(counter->core.threads);
int ncpus = evsel__nr_cpus(counter);
int idx, thread;
for (thread = 0; thread < nthreads; thread++) {
for (idx = 0; idx < ncpus; idx++) {
if (process_counter_values(config, counter, idx, thread,
perf_counts(counter->counts, idx, thread)))
return -1;
}
}
return 0;
}
int perf_stat_process_counter(struct perf_stat_config *config,
struct evsel *counter)
{
struct perf_stat_evsel *ps = counter->stats;
u64 *count;
int ret;
if (counter->per_pkg)
evsel__zero_per_pkg(counter);
ret = process_counter_maps(config, counter);
if (ret)
return ret;
if (config->aggr_mode != AGGR_GLOBAL)
return 0;
/*
* GLOBAL aggregation mode only has a single aggr counts,
* so we can use ps->aggr[0] as the actual output.
*/
count = ps->aggr[0].counts.values;
update_stats(&ps->res_stats, *count);
if (verbose > 0) {
fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
evsel__name(counter), count[0], count[1], count[2]);
}
return 0;
}
static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
{
struct perf_stat_evsel *ps_a = evsel->stats;
struct perf_stat_evsel *ps_b = alias->stats;
int i;
if (ps_a->aggr == NULL && ps_b->aggr == NULL)
return 0;
if (ps_a->nr_aggr != ps_b->nr_aggr) {
pr_err("Unmatched aggregation mode between aliases\n");
return -1;
}
for (i = 0; i < ps_a->nr_aggr; i++) {
struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
/* NB: don't increase aggr.nr for aliases */
aggr_counts_a->val += aggr_counts_b->val;
aggr_counts_a->ena += aggr_counts_b->ena;
aggr_counts_a->run += aggr_counts_b->run;
}
return 0;
}
/* events should have the same name, scale, unit, cgroup but on different PMUs */
static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
{
if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
return false;
if (evsel_a->scale != evsel_b->scale)
return false;
if (evsel_a->cgrp != evsel_b->cgrp)
return false;
if (strcmp(evsel_a->unit, evsel_b->unit))
return false;
if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
return false;
return !!strcmp(evsel_a->pmu_name, evsel_b->pmu_name);
}
static void evsel__merge_aliases(struct evsel *evsel)
{
struct evlist *evlist = evsel->evlist;
struct evsel *alias;
alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
/* Merge the same events on different PMUs. */
if (evsel__is_alias(evsel, alias)) {
evsel__merge_aggr_counters(evsel, alias);
alias->merged_stat = true;
}
}
}
static bool evsel__should_merge_hybrid(const struct evsel *evsel,
const struct perf_stat_config *config)
{
return config->hybrid_merge && evsel__is_hybrid(evsel);
}
static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
{
/* this evsel is already merged */
if (evsel->merged_stat)
return;
if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
evsel__merge_aliases(evsel);
}
/* merge the same uncore and hybrid events if requested */
void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
{
struct evsel *evsel;
if (config->no_merge)
return;
evlist__for_each_entry(evlist, evsel)
evsel__merge_stats(evsel, config);
}
static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
{
struct perf_stat_evsel *ps = evsel->stats;
struct perf_counts_values counts = { 0, };
struct aggr_cpu_id id;
struct perf_cpu cpu;
int idx;
/* collect per-core counts */
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
struct perf_stat_aggr *aggr = &ps->aggr[idx];
id = aggr_cpu_id__core(cpu, NULL);
if (!aggr_cpu_id__equal(core_id, &id))
continue;
counts.val += aggr->counts.val;
counts.ena += aggr->counts.ena;
counts.run += aggr->counts.run;
}
/* update aggregated per-core counts for each CPU */
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
struct perf_stat_aggr *aggr = &ps->aggr[idx];
id = aggr_cpu_id__core(cpu, NULL);
if (!aggr_cpu_id__equal(core_id, &id))
continue;
aggr->counts.val = counts.val;
aggr->counts.ena = counts.ena;
aggr->counts.run = counts.run;
aggr->used = true;
}
}
/* we have an aggr_map for cpu, but want to aggregate the counters per-core */
static void evsel__process_percore(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
struct aggr_cpu_id core_id;
struct perf_cpu cpu;
int idx;
if (!evsel->percore)
return;
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
struct perf_stat_aggr *aggr = &ps->aggr[idx];
if (aggr->used)
continue;
core_id = aggr_cpu_id__core(cpu, NULL);
evsel__update_percore_stats(evsel, &core_id);
}
}
/* process cpu stats on per-core events */
void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
{
struct evsel *evsel;
if (config->aggr_mode != AGGR_NONE)
return;
evlist__for_each_entry(evlist, evsel)
evsel__process_percore(evsel);
}
int perf_event__process_stat_event(struct perf_session *session,
union perf_event *event)
{
struct perf_counts_values count, *ptr;
struct perf_record_stat *st = &event->stat;
struct evsel *counter;
int cpu_map_idx;
count.val = st->val;
count.ena = st->ena;
count.run = st->run;
counter = evlist__id2evsel(session->evlist, st->id);
if (!counter) {
pr_err("Failed to resolve counter for stat event.\n");
return -EINVAL;
}
cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
if (cpu_map_idx == -1) {
pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
return -EINVAL;
}
ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
if (ptr == NULL) {
pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
st->cpu, st->thread, evsel__name(counter));
return -EINVAL;
}
*ptr = count;
counter->supported = true;
return 0;
}
size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
{
struct perf_record_stat *st = (struct perf_record_stat *)event;
size_t ret;
ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
st->id, st->cpu, st->thread);
ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
st->val, st->ena, st->run);
return ret;
}
size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
{
struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
size_t ret;
ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
return ret;
}
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
{
struct perf_stat_config sc = {};
size_t ret;
perf_event__read_stat_config(&sc, &event->stat_config);
ret = fprintf(fp, "\n");
ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
ret += fprintf(fp, "... scale %d\n", sc.scale);
ret += fprintf(fp, "... interval %u\n", sc.interval);
return ret;
}
int create_perf_stat_counter(struct evsel *evsel,
struct perf_stat_config *config,
struct target *target,
int cpu_map_idx)
{
struct perf_event_attr *attr = &evsel->core.attr;
struct evsel *leader = evsel__leader(evsel);
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
/*
* The event is part of non trivial group, let's enable
* the group read (for leader) and ID retrieval for all
* members.
*/
if (leader->core.nr_members > 1)
attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
/*
* Some events get initialized with sample_(period/type) set,
* like tracepoints. Clear it up for counting.
*/
attr->sample_period = 0;
if (config->identifier)
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
if (config->all_user) {
attr->exclude_kernel = 1;
attr->exclude_user = 0;
}
if (config->all_kernel) {
attr->exclude_kernel = 0;
attr->exclude_user = 1;
}
/*
* Disabling all counters initially, they will be enabled
* either manually by us or by kernel via enable_on_exec
* set later.
*/
if (evsel__is_group_leader(evsel)) {
attr->disabled = 1;
if (target__enable_on_exec(target))
attr->enable_on_exec = 1;
}
if (target__has_cpu(target) && !target__has_per_thread(target))
return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
return evsel__open_per_thread(evsel, evsel->core.threads);
}
| linux-master | tools/perf/util/stat.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include "dump-insn.h"
/* Fallback code */
__weak
const char *dump_insn(struct perf_insn *x __maybe_unused,
u64 ip __maybe_unused, u8 *inbuf __maybe_unused,
int inlen __maybe_unused, int *lenp)
{
if (lenp)
*lenp = 0;
return "?";
}
__weak
int arch_is_branch(const unsigned char *buf __maybe_unused,
size_t len __maybe_unused,
int x86_64 __maybe_unused)
{
return 0;
}
| linux-master | tools/perf/util/dump-insn.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2015-2018 Linaro Limited.
*
* Author: Tor Jeremiassen <[email protected]>
* Author: Mathieu Poirier <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include "auxtrace.h"
#include "color.h"
#include "cs-etm.h"
#include "cs-etm-decoder/cs-etm-decoder.h"
#include "debug.h"
#include "dso.h"
#include "evlist.h"
#include "intlist.h"
#include "machine.h"
#include "map.h"
#include "perf.h"
#include "session.h"
#include "map_symbol.h"
#include "branch.h"
#include "symbol.h"
#include "tool.h"
#include "thread.h"
#include "thread-stack.h"
#include "tsc.h"
#include <tools/libc_compat.h>
#include "util/synthetic-events.h"
#include "util/util.h"
struct cs_etm_auxtrace {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
struct auxtrace_heap heap;
struct itrace_synth_opts synth_opts;
struct perf_session *session;
struct perf_tsc_conversion tc;
/*
* Timeless has no timestamps in the trace so overlapping mmap lookups
* are less accurate but produces smaller trace data. We use context IDs
* in the trace instead of matching timestamps with fork records so
* they're not really needed in the general case. Overlapping mmaps
* happen in cases like between a fork and an exec.
*/
bool timeless_decoding;
/*
* Per-thread ignores the trace channel ID and instead assumes that
* everything in a buffer comes from the same process regardless of
* which CPU it ran on. It also implies no context IDs so the TID is
* taken from the auxtrace buffer.
*/
bool per_thread_decoding;
bool snapshot_mode;
bool data_queued;
bool has_virtual_ts; /* Virtual/Kernel timestamps in the trace. */
int num_cpu;
u64 latest_kernel_timestamp;
u32 auxtrace_type;
u64 branches_sample_type;
u64 branches_id;
u64 instructions_sample_type;
u64 instructions_sample_period;
u64 instructions_id;
u64 **metadata;
unsigned int pmu_type;
enum cs_etm_pid_fmt pid_fmt;
};
struct cs_etm_traceid_queue {
u8 trace_chan_id;
u64 period_instructions;
size_t last_branch_pos;
union perf_event *event_buf;
struct thread *thread;
struct thread *prev_packet_thread;
ocsd_ex_level prev_packet_el;
ocsd_ex_level el;
struct branch_stack *last_branch;
struct branch_stack *last_branch_rb;
struct cs_etm_packet *prev_packet;
struct cs_etm_packet *packet;
struct cs_etm_packet_queue packet_queue;
};
struct cs_etm_queue {
struct cs_etm_auxtrace *etm;
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
unsigned int queue_nr;
u8 pending_timestamp_chan_id;
u64 offset;
const unsigned char *buf;
size_t buf_len, buf_used;
/* Conversion between traceID and index in traceid_queues array */
struct intlist *traceid_queues_list;
struct cs_etm_traceid_queue **traceid_queues;
};
/* RB tree for quick conversion between traceID and metadata pointers */
static struct intlist *traceid_list;
static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
pid_t tid);
static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
/* PTMs ETMIDR [11:8] set to b0011 */
#define ETMIDR_PTM_VERSION 0x00000300
/*
* A struct auxtrace_heap_item only has a queue_nr and a timestamp to
* work with. One option is to modify to auxtrace_heap_XYZ() API or simply
* encode the etm queue number as the upper 16 bit and the channel as
* the lower 16 bit.
*/
#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
(queue_nr << 16 | trace_chan_id)
#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
{
etmidr &= ETMIDR_PTM_VERSION;
if (etmidr == ETMIDR_PTM_VERSION)
return CS_ETM_PROTO_PTM;
return CS_ETM_PROTO_ETMV3;
}
static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
{
struct int_node *inode;
u64 *metadata;
inode = intlist__find(traceid_list, trace_chan_id);
if (!inode)
return -EINVAL;
metadata = inode->priv;
*magic = metadata[CS_ETM_MAGIC];
return 0;
}
int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
{
struct int_node *inode;
u64 *metadata;
inode = intlist__find(traceid_list, trace_chan_id);
if (!inode)
return -EINVAL;
metadata = inode->priv;
*cpu = (int)metadata[CS_ETM_CPU];
return 0;
}
/*
* The returned PID format is presented as an enum:
*
* CS_ETM_PIDFMT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced.
* CS_ETM_PIDFMT_CTXTID2: CONTEXTIDR_EL2 is traced.
* CS_ETM_PIDFMT_NONE: No context IDs
*
* It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
* are enabled at the same time when the session runs on an EL2 kernel.
* This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
* recorded in the trace data, the tool will selectively use
* CONTEXTIDR_EL2 as PID.
*
* The result is cached in etm->pid_fmt so this function only needs to be called
* when processing the aux info.
*/
static enum cs_etm_pid_fmt cs_etm__init_pid_fmt(u64 *metadata)
{
u64 val;
if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
val = metadata[CS_ETM_ETMCR];
/* CONTEXTIDR is traced */
if (val & BIT(ETM_OPT_CTXTID))
return CS_ETM_PIDFMT_CTXTID;
} else {
val = metadata[CS_ETMV4_TRCCONFIGR];
/* CONTEXTIDR_EL2 is traced */
if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
return CS_ETM_PIDFMT_CTXTID2;
/* CONTEXTIDR_EL1 is traced */
else if (val & BIT(ETM4_CFG_BIT_CTXTID))
return CS_ETM_PIDFMT_CTXTID;
}
return CS_ETM_PIDFMT_NONE;
}
enum cs_etm_pid_fmt cs_etm__get_pid_fmt(struct cs_etm_queue *etmq)
{
return etmq->etm->pid_fmt;
}
static int cs_etm__map_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
{
struct int_node *inode;
/* Get an RB node for this CPU */
inode = intlist__findnew(traceid_list, trace_chan_id);
/* Something went wrong, no need to continue */
if (!inode)
return -ENOMEM;
/*
* The node for that CPU should not be taken.
* Back out if that's the case.
*/
if (inode->priv)
return -EINVAL;
/* All good, associate the traceID with the metadata pointer */
inode->priv = cpu_metadata;
return 0;
}
static int cs_etm__metadata_get_trace_id(u8 *trace_chan_id, u64 *cpu_metadata)
{
u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
switch (cs_etm_magic) {
case __perf_cs_etmv3_magic:
*trace_chan_id = (u8)(cpu_metadata[CS_ETM_ETMTRACEIDR] &
CORESIGHT_TRACE_ID_VAL_MASK);
break;
case __perf_cs_etmv4_magic:
case __perf_cs_ete_magic:
*trace_chan_id = (u8)(cpu_metadata[CS_ETMV4_TRCTRACEIDR] &
CORESIGHT_TRACE_ID_VAL_MASK);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* update metadata trace ID from the value found in the AUX_HW_INFO packet.
* This will also clear the CORESIGHT_TRACE_ID_UNUSED_FLAG flag if present.
*/
static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
{
u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
switch (cs_etm_magic) {
case __perf_cs_etmv3_magic:
cpu_metadata[CS_ETM_ETMTRACEIDR] = trace_chan_id;
break;
case __perf_cs_etmv4_magic:
case __perf_cs_ete_magic:
cpu_metadata[CS_ETMV4_TRCTRACEIDR] = trace_chan_id;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Get a metadata for a specific cpu from an array.
*
*/
static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
{
int i;
u64 *metadata = NULL;
for (i = 0; i < etm->num_cpu; i++) {
if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
metadata = etm->metadata[i];
break;
}
}
return metadata;
}
/*
* Handle the PERF_RECORD_AUX_OUTPUT_HW_ID event.
*
* The payload associates the Trace ID and the CPU.
* The routine is tolerant of seeing multiple packets with the same association,
* but a CPU / Trace ID association changing during a session is an error.
*/
static int cs_etm__process_aux_output_hw_id(struct perf_session *session,
union perf_event *event)
{
struct cs_etm_auxtrace *etm;
struct perf_sample sample;
struct int_node *inode;
struct evsel *evsel;
u64 *cpu_data;
u64 hw_id;
int cpu, version, err;
u8 trace_chan_id, curr_chan_id;
/* extract and parse the HW ID */
hw_id = event->aux_output_hw_id.hw_id;
version = FIELD_GET(CS_AUX_HW_ID_VERSION_MASK, hw_id);
trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
/* check that we can handle this version */
if (version > CS_AUX_HW_ID_CURR_VERSION)
return -EINVAL;
/* get access to the etm metadata */
etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace);
if (!etm || !etm->metadata)
return -EINVAL;
/* parse the sample to get the CPU */
evsel = evlist__event2evsel(session->evlist, event);
if (!evsel)
return -EINVAL;
err = evsel__parse_sample(evsel, event, &sample);
if (err)
return err;
cpu = sample.cpu;
if (cpu == -1) {
/* no CPU in the sample - possibly recorded with an old version of perf */
pr_err("CS_ETM: no CPU AUX_OUTPUT_HW_ID sample. Use compatible perf to record.");
return -EINVAL;
}
/* See if the ID is mapped to a CPU, and it matches the current CPU */
inode = intlist__find(traceid_list, trace_chan_id);
if (inode) {
cpu_data = inode->priv;
if ((int)cpu_data[CS_ETM_CPU] != cpu) {
pr_err("CS_ETM: map mismatch between HW_ID packet CPU and Trace ID\n");
return -EINVAL;
}
/* check that the mapped ID matches */
err = cs_etm__metadata_get_trace_id(&curr_chan_id, cpu_data);
if (err)
return err;
if (curr_chan_id != trace_chan_id) {
pr_err("CS_ETM: mismatch between CPU trace ID and HW_ID packet ID\n");
return -EINVAL;
}
/* mapped and matched - return OK */
return 0;
}
cpu_data = get_cpu_data(etm, cpu);
if (cpu_data == NULL)
return err;
/* not one we've seen before - lets map it */
err = cs_etm__map_trace_id(trace_chan_id, cpu_data);
if (err)
return err;
/*
* if we are picking up the association from the packet, need to plug
* the correct trace ID into the metadata for setting up decoders later.
*/
err = cs_etm__metadata_set_trace_id(trace_chan_id, cpu_data);
return err;
}
void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
u8 trace_chan_id)
{
/*
* When a timestamp packet is encountered the backend code
* is stopped so that the front end has time to process packets
* that were accumulated in the traceID queue. Since there can
* be more than one channel per cs_etm_queue, we need to specify
* what traceID queue needs servicing.
*/
etmq->pending_timestamp_chan_id = trace_chan_id;
}
static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
u8 *trace_chan_id)
{
struct cs_etm_packet_queue *packet_queue;
if (!etmq->pending_timestamp_chan_id)
return 0;
if (trace_chan_id)
*trace_chan_id = etmq->pending_timestamp_chan_id;
packet_queue = cs_etm__etmq_get_packet_queue(etmq,
etmq->pending_timestamp_chan_id);
if (!packet_queue)
return 0;
/* Acknowledge pending status */
etmq->pending_timestamp_chan_id = 0;
/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
return packet_queue->cs_timestamp;
}
static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
{
int i;
queue->head = 0;
queue->tail = 0;
queue->packet_count = 0;
for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
queue->packet_buffer[i].instr_count = 0;
queue->packet_buffer[i].last_instr_taken_branch = false;
queue->packet_buffer[i].last_instr_size = 0;
queue->packet_buffer[i].last_instr_type = 0;
queue->packet_buffer[i].last_instr_subtype = 0;
queue->packet_buffer[i].last_instr_cond = 0;
queue->packet_buffer[i].flags = 0;
queue->packet_buffer[i].exception_number = UINT32_MAX;
queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
queue->packet_buffer[i].cpu = INT_MIN;
}
}
static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
{
int idx;
struct int_node *inode;
struct cs_etm_traceid_queue *tidq;
struct intlist *traceid_queues_list = etmq->traceid_queues_list;
intlist__for_each_entry(inode, traceid_queues_list) {
idx = (int)(intptr_t)inode->priv;
tidq = etmq->traceid_queues[idx];
cs_etm__clear_packet_queue(&tidq->packet_queue);
}
}
static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq,
u8 trace_chan_id)
{
int rc = -ENOMEM;
struct auxtrace_queue *queue;
struct cs_etm_auxtrace *etm = etmq->etm;
cs_etm__clear_packet_queue(&tidq->packet_queue);
queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
tidq->trace_chan_id = trace_chan_id;
tidq->el = tidq->prev_packet_el = ocsd_EL_unknown;
tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1,
queue->tid);
tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host);
tidq->packet = zalloc(sizeof(struct cs_etm_packet));
if (!tidq->packet)
goto out;
tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
if (!tidq->prev_packet)
goto out_free;
if (etm->synth_opts.last_branch) {
size_t sz = sizeof(struct branch_stack);
sz += etm->synth_opts.last_branch_sz *
sizeof(struct branch_entry);
tidq->last_branch = zalloc(sz);
if (!tidq->last_branch)
goto out_free;
tidq->last_branch_rb = zalloc(sz);
if (!tidq->last_branch_rb)
goto out_free;
}
tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
if (!tidq->event_buf)
goto out_free;
return 0;
out_free:
zfree(&tidq->last_branch_rb);
zfree(&tidq->last_branch);
zfree(&tidq->prev_packet);
zfree(&tidq->packet);
out:
return rc;
}
static struct cs_etm_traceid_queue
*cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
{
int idx;
struct int_node *inode;
struct intlist *traceid_queues_list;
struct cs_etm_traceid_queue *tidq, **traceid_queues;
struct cs_etm_auxtrace *etm = etmq->etm;
if (etm->per_thread_decoding)
trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
traceid_queues_list = etmq->traceid_queues_list;
/*
* Check if the traceid_queue exist for this traceID by looking
* in the queue list.
*/
inode = intlist__find(traceid_queues_list, trace_chan_id);
if (inode) {
idx = (int)(intptr_t)inode->priv;
return etmq->traceid_queues[idx];
}
/* We couldn't find a traceid_queue for this traceID, allocate one */
tidq = malloc(sizeof(*tidq));
if (!tidq)
return NULL;
memset(tidq, 0, sizeof(*tidq));
/* Get a valid index for the new traceid_queue */
idx = intlist__nr_entries(traceid_queues_list);
/* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
inode = intlist__findnew(traceid_queues_list, trace_chan_id);
if (!inode)
goto out_free;
/* Associate this traceID with this index */
inode->priv = (void *)(intptr_t)idx;
if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
goto out_free;
/* Grow the traceid_queues array by one unit */
traceid_queues = etmq->traceid_queues;
traceid_queues = reallocarray(traceid_queues,
idx + 1,
sizeof(*traceid_queues));
/*
* On failure reallocarray() returns NULL and the original block of
* memory is left untouched.
*/
if (!traceid_queues)
goto out_free;
traceid_queues[idx] = tidq;
etmq->traceid_queues = traceid_queues;
return etmq->traceid_queues[idx];
out_free:
/*
* Function intlist__remove() removes the inode from the list
* and delete the memory associated to it.
*/
intlist__remove(traceid_queues_list, inode);
free(tidq);
return NULL;
}
struct cs_etm_packet_queue
*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
{
struct cs_etm_traceid_queue *tidq;
tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
if (tidq)
return &tidq->packet_queue;
return NULL;
}
static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
struct cs_etm_traceid_queue *tidq)
{
struct cs_etm_packet *tmp;
if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
etm->synth_opts.instructions) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
*
* Threads and exception levels are also tracked for both the
* previous and current packets. This is because the previous
* packet is used for the 'from' IP for branch samples, so the
* thread at that time must also be assigned to that sample.
* Across discontinuity packets the thread can change, so by
* tracking the thread for the previous packet the branch sample
* will have the correct info.
*/
tmp = tidq->packet;
tidq->packet = tidq->prev_packet;
tidq->prev_packet = tmp;
tidq->prev_packet_el = tidq->el;
thread__put(tidq->prev_packet_thread);
tidq->prev_packet_thread = thread__get(tidq->thread);
}
}
static void cs_etm__packet_dump(const char *pkt_string)
{
const char *color = PERF_COLOR_BLUE;
int len = strlen(pkt_string);
if (len && (pkt_string[len-1] == '\n'))
color_fprintf(stdout, color, " %s", pkt_string);
else
color_fprintf(stdout, color, " %s\n", pkt_string);
fflush(stdout);
}
static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
struct cs_etm_auxtrace *etm, int idx,
u32 etmidr)
{
u64 **metadata = etm->metadata;
t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
}
static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
struct cs_etm_auxtrace *etm, int idx)
{
u64 **metadata = etm->metadata;
t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
}
static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
struct cs_etm_auxtrace *etm, int idx)
{
u64 **metadata = etm->metadata;
t_params[idx].protocol = CS_ETM_PROTO_ETE;
t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETE_TRCIDR0];
t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETE_TRCIDR1];
t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETE_TRCIDR2];
t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETE_TRCIDR8];
t_params[idx].ete.reg_configr = metadata[idx][CS_ETE_TRCCONFIGR];
t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETE_TRCTRACEIDR];
t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
}
static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
struct cs_etm_auxtrace *etm,
int decoders)
{
int i;
u32 etmidr;
u64 architecture;
for (i = 0; i < decoders; i++) {
architecture = etm->metadata[i][CS_ETM_MAGIC];
switch (architecture) {
case __perf_cs_etmv3_magic:
etmidr = etm->metadata[i][CS_ETM_ETMIDR];
cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
break;
case __perf_cs_etmv4_magic:
cs_etm__set_trace_param_etmv4(t_params, etm, i);
break;
case __perf_cs_ete_magic:
cs_etm__set_trace_param_ete(t_params, etm, i);
break;
default:
return -EINVAL;
}
}
return 0;
}
static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
struct cs_etm_queue *etmq,
enum cs_etm_decoder_operation mode,
bool formatted)
{
int ret = -EINVAL;
if (!(mode < CS_ETM_OPERATION_MAX))
goto out;
d_params->packet_printer = cs_etm__packet_dump;
d_params->operation = mode;
d_params->data = etmq;
d_params->formatted = formatted;
d_params->fsyncs = false;
d_params->hsyncs = false;
d_params->frame_aligned = true;
ret = 0;
out:
return ret;
}
static void cs_etm__dump_event(struct cs_etm_queue *etmq,
struct auxtrace_buffer *buffer)
{
int ret;
const char *color = PERF_COLOR_BLUE;
size_t buffer_used = 0;
fprintf(stdout, "\n");
color_fprintf(stdout, color,
". ... CoreSight %s Trace data: size %#zx bytes\n",
cs_etm_decoder__get_name(etmq->decoder), buffer->size);
do {
size_t consumed;
ret = cs_etm_decoder__process_data_block(
etmq->decoder, buffer->offset,
&((u8 *)buffer->data)[buffer_used],
buffer->size - buffer_used, &consumed);
if (ret)
break;
buffer_used += consumed;
} while (buffer_used < buffer->size);
cs_etm_decoder__reset(etmq->decoder);
}
static int cs_etm__flush_events(struct perf_session *session,
struct perf_tool *tool)
{
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
if (dump_trace)
return 0;
if (!tool->ordered_events)
return -EINVAL;
if (etm->timeless_decoding) {
/*
* Pass tid = -1 to process all queues. But likely they will have
* already been processed on PERF_RECORD_EXIT anyway.
*/
return cs_etm__process_timeless_queues(etm, -1);
}
return cs_etm__process_timestamped_queues(etm);
}
static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
{
int idx;
uintptr_t priv;
struct int_node *inode, *tmp;
struct cs_etm_traceid_queue *tidq;
struct intlist *traceid_queues_list = etmq->traceid_queues_list;
intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
priv = (uintptr_t)inode->priv;
idx = priv;
/* Free this traceid_queue from the array */
tidq = etmq->traceid_queues[idx];
thread__zput(tidq->thread);
thread__zput(tidq->prev_packet_thread);
zfree(&tidq->event_buf);
zfree(&tidq->last_branch);
zfree(&tidq->last_branch_rb);
zfree(&tidq->prev_packet);
zfree(&tidq->packet);
zfree(&tidq);
/*
* Function intlist__remove() removes the inode from the list
* and delete the memory associated to it.
*/
intlist__remove(traceid_queues_list, inode);
}
/* Then the RB tree itself */
intlist__delete(traceid_queues_list);
etmq->traceid_queues_list = NULL;
/* finally free the traceid_queues array */
zfree(&etmq->traceid_queues);
}
static void cs_etm__free_queue(void *priv)
{
struct cs_etm_queue *etmq = priv;
if (!etmq)
return;
cs_etm_decoder__free(etmq->decoder);
cs_etm__free_traceid_queues(etmq);
free(etmq);
}
static void cs_etm__free_events(struct perf_session *session)
{
unsigned int i;
struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
struct auxtrace_queues *queues = &aux->queues;
for (i = 0; i < queues->nr_queues; i++) {
cs_etm__free_queue(queues->queue_array[i].priv);
queues->queue_array[i].priv = NULL;
}
auxtrace_queues__free(queues);
}
static void cs_etm__free(struct perf_session *session)
{
int i;
struct int_node *inode, *tmp;
struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
cs_etm__free_events(session);
session->auxtrace = NULL;
/* First remove all traceID/metadata nodes for the RB tree */
intlist__for_each_entry_safe(inode, tmp, traceid_list)
intlist__remove(traceid_list, inode);
/* Then the RB tree itself */
intlist__delete(traceid_list);
for (i = 0; i < aux->num_cpu; i++)
zfree(&aux->metadata[i]);
zfree(&aux->metadata);
zfree(&aux);
}
static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel)
{
struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
return evsel->core.attr.type == aux->pmu_type;
}
static struct machine *cs_etm__get_machine(struct cs_etm_queue *etmq,
ocsd_ex_level el)
{
enum cs_etm_pid_fmt pid_fmt = cs_etm__get_pid_fmt(etmq);
/*
* For any virtualisation based on nVHE (e.g. pKVM), or host kernels
* running at EL1 assume everything is the host.
*/
if (pid_fmt == CS_ETM_PIDFMT_CTXTID)
return &etmq->etm->session->machines.host;
/*
* Not perfect, but otherwise assume anything in EL1 is the default
* guest, and everything else is the host. Distinguishing between guest
* and host userspaces isn't currently supported either. Neither is
* multiple guest support. All this does is reduce the likeliness of
* decode errors where we look into the host kernel maps when it should
* have been the guest maps.
*/
switch (el) {
case ocsd_EL1:
return machines__find_guest(&etmq->etm->session->machines,
DEFAULT_GUEST_KERNEL_ID);
case ocsd_EL3:
case ocsd_EL2:
case ocsd_EL0:
case ocsd_EL_unknown:
default:
return &etmq->etm->session->machines.host;
}
}
static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address,
ocsd_ex_level el)
{
struct machine *machine = cs_etm__get_machine(etmq, el);
if (address >= machine__kernel_start(machine)) {
if (machine__is_host(machine))
return PERF_RECORD_MISC_KERNEL;
else
return PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (machine__is_host(machine))
return PERF_RECORD_MISC_USER;
else {
/*
* Can't really happen at the moment because
* cs_etm__get_machine() will always return
* machines.host for any non EL1 trace.
*/
return PERF_RECORD_MISC_GUEST_USER;
}
}
}
static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
u64 address, size_t size, u8 *buffer,
const ocsd_mem_space_acc_t mem_space)
{
u8 cpumode;
u64 offset;
int len;
struct addr_location al;
struct dso *dso;
struct cs_etm_traceid_queue *tidq;
int ret = 0;
if (!etmq)
return 0;
addr_location__init(&al);
tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
if (!tidq)
goto out;
/*
* We've already tracked EL along side the PID in cs_etm__set_thread()
* so double check that it matches what OpenCSD thinks as well. It
* doesn't distinguish between EL0 and EL1 for this mem access callback
* so we had to do the extra tracking. Skip validation if it's any of
* the 'any' values.
*/
if (!(mem_space == OCSD_MEM_SPACE_ANY ||
mem_space == OCSD_MEM_SPACE_N || mem_space == OCSD_MEM_SPACE_S)) {
if (mem_space & OCSD_MEM_SPACE_EL1N) {
/* Includes both non secure EL1 and EL0 */
assert(tidq->el == ocsd_EL1 || tidq->el == ocsd_EL0);
} else if (mem_space & OCSD_MEM_SPACE_EL2)
assert(tidq->el == ocsd_EL2);
else if (mem_space & OCSD_MEM_SPACE_EL3)
assert(tidq->el == ocsd_EL3);
}
cpumode = cs_etm__cpu_mode(etmq, address, tidq->el);
if (!thread__find_map(tidq->thread, cpumode, address, &al))
goto out;
dso = map__dso(al.map);
if (!dso)
goto out;
if (dso->data.status == DSO_DATA_STATUS_ERROR &&
dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE))
goto out;
offset = map__map_ip(al.map, address);
map__load(al.map);
len = dso__data_read_offset(dso, maps__machine(thread__maps(tidq->thread)),
offset, buffer, size);
if (len <= 0) {
ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
" Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
if (!dso->auxtrace_warned) {
pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
address,
dso->long_name ? dso->long_name : "Unknown");
dso->auxtrace_warned = true;
}
goto out;
}
ret = len;
out:
addr_location__exit(&al);
return ret;
}
static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
bool formatted)
{
struct cs_etm_decoder_params d_params;
struct cs_etm_trace_params *t_params = NULL;
struct cs_etm_queue *etmq;
/*
* Each queue can only contain data from one CPU when unformatted, so only one decoder is
* needed.
*/
int decoders = formatted ? etm->num_cpu : 1;
etmq = zalloc(sizeof(*etmq));
if (!etmq)
return NULL;
etmq->traceid_queues_list = intlist__new(NULL);
if (!etmq->traceid_queues_list)
goto out_free;
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * decoders);
if (!t_params)
goto out_free;
if (cs_etm__init_trace_params(t_params, etm, decoders))
goto out_free;
/* Set decoder parameters to decode trace packets */
if (cs_etm__init_decoder_params(&d_params, etmq,
dump_trace ? CS_ETM_OPERATION_PRINT :
CS_ETM_OPERATION_DECODE,
formatted))
goto out_free;
etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
t_params);
if (!etmq->decoder)
goto out_free;
/*
* Register a function to handle all memory accesses required by
* the trace decoder library.
*/
if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
0x0L, ((u64) -1L),
cs_etm__mem_access))
goto out_free_decoder;
zfree(&t_params);
return etmq;
out_free_decoder:
cs_etm_decoder__free(etmq->decoder);
out_free:
intlist__delete(etmq->traceid_queues_list);
free(etmq);
return NULL;
}
static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr,
bool formatted)
{
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
return 0;
etmq = cs_etm__alloc_queue(etm, formatted);
if (!etmq)
return -ENOMEM;
queue->priv = etmq;
etmq->etm = etm;
etmq->queue_nr = queue_nr;
etmq->offset = 0;
return 0;
}
static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
struct cs_etm_queue *etmq,
unsigned int queue_nr)
{
int ret = 0;
unsigned int cs_queue_nr;
u8 trace_chan_id;
u64 cs_timestamp;
/*
* We are under a CPU-wide trace scenario. As such we need to know
* when the code that generated the traces started to execute so that
* it can be correlated with execution on other CPUs. So we get a
* handle on the beginning of traces and decode until we find a
* timestamp. The timestamp is then added to the auxtrace min heap
* in order to know what nibble (of all the etmqs) to decode first.
*/
while (1) {
/*
* Fetch an aux_buffer from this etmq. Bail if no more
* blocks or an error has been encountered.
*/
ret = cs_etm__get_data_block(etmq);
if (ret <= 0)
goto out;
/*
* Run decoder on the trace block. The decoder will stop when
* encountering a CS timestamp, a full packet queue or the end of
* trace for that block.
*/
ret = cs_etm__decode_data_block(etmq);
if (ret)
goto out;
/*
* Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
* the timestamp calculation for us.
*/
cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
/* We found a timestamp, no need to continue. */
if (cs_timestamp)
break;
/*
* We didn't find a timestamp so empty all the traceid packet
* queues before looking for another timestamp packet, either
* in the current data block or a new one. Packets that were
* just decoded are useless since no timestamp has been
* associated with them. As such simply discard them.
*/
cs_etm__clear_all_packet_queues(etmq);
}
/*
* We have a timestamp. Add it to the min heap to reflect when
* instructions conveyed by the range packets of this traceID queue
* started to execute. Once the same has been done for all the traceID
* queues of each etmq, redenring and decoding can start in
* chronological order.
*
* Note that packets decoded above are still in the traceID's packet
* queue and will be processed in cs_etm__process_timestamped_queues().
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
out:
return ret;
}
static inline
void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
struct branch_stack *bs_src = tidq->last_branch_rb;
struct branch_stack *bs_dst = tidq->last_branch;
size_t nr = 0;
/*
* Set the number of records before early exit: ->nr is used to
* determine how many branches to copy from ->entries.
*/
bs_dst->nr = bs_src->nr;
/*
* Early exit when there is nothing to copy.
*/
if (!bs_src->nr)
return;
/*
* As bs_src->entries is a circular buffer, we need to copy from it in
* two steps. First, copy the branches from the most recently inserted
* branch ->last_branch_pos until the end of bs_src->entries buffer.
*/
nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
memcpy(&bs_dst->entries[0],
&bs_src->entries[tidq->last_branch_pos],
sizeof(struct branch_entry) * nr);
/*
* If we wrapped around at least once, the branches from the beginning
* of the bs_src->entries buffer and until the ->last_branch_pos element
* are older valid branches: copy them over. The total number of
* branches copied over will be equal to the number of branches asked by
* the user in last_branch_sz.
*/
if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
memcpy(&bs_dst->entries[nr],
&bs_src->entries[0],
sizeof(struct branch_entry) * tidq->last_branch_pos);
}
}
static inline
void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
{
tidq->last_branch_pos = 0;
tidq->last_branch_rb->nr = 0;
}
static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
u8 trace_chan_id, u64 addr)
{
u8 instrBytes[2];
cs_etm__mem_access(etmq, trace_chan_id, addr, ARRAY_SIZE(instrBytes),
instrBytes, 0);
/*
* T32 instruction size is indicated by bits[15:11] of the first
* 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
* denote a 32-bit instruction.
*/
return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
}
static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
{
/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
if (packet->sample_type == CS_ETM_DISCONTINUITY)
return 0;
return packet->start_addr;
}
static inline
u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
{
/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
if (packet->sample_type == CS_ETM_DISCONTINUITY)
return 0;
return packet->end_addr - packet->last_instr_size;
}
static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
u64 trace_chan_id,
const struct cs_etm_packet *packet,
u64 offset)
{
if (packet->isa == CS_ETM_ISA_T32) {
u64 addr = packet->start_addr;
while (offset) {
addr += cs_etm__t32_instr_size(etmq,
trace_chan_id, addr);
offset--;
}
return addr;
}
/* Assume a 4 byte instruction size (A32/A64) */
return packet->start_addr + offset * 4;
}
static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
struct branch_stack *bs = tidq->last_branch_rb;
struct branch_entry *be;
/*
* The branches are recorded in a circular buffer in reverse
* chronological order: we start recording from the last element of the
* buffer down. After writing the first element of the stack, move the
* insert position back to the end of the buffer.
*/
if (!tidq->last_branch_pos)
tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
tidq->last_branch_pos -= 1;
be = &bs->entries[tidq->last_branch_pos];
be->from = cs_etm__last_executed_instr(tidq->prev_packet);
be->to = cs_etm__first_executed_instr(tidq->packet);
/* No support for mispredict */
be->flags.mispred = 0;
be->flags.predicted = 1;
/*
* Increment bs->nr until reaching the number of last branches asked by
* the user on the command line.
*/
if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
bs->nr += 1;
}
static int cs_etm__inject_event(union perf_event *event,
struct perf_sample *sample, u64 type)
{
event->header.size = perf_event__sample_event_size(sample, type, 0);
return perf_event__synthesize_sample(event, type, 0, sample);
}
static int
cs_etm__get_trace(struct cs_etm_queue *etmq)
{
struct auxtrace_buffer *aux_buffer = etmq->buffer;
struct auxtrace_buffer *old_buffer = aux_buffer;
struct auxtrace_queue *queue;
queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
/* If no more data, drop the previous auxtrace_buffer and return */
if (!aux_buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
etmq->buf_len = 0;
return 0;
}
etmq->buffer = aux_buffer;
/* If the aux_buffer doesn't have data associated, try to load it */
if (!aux_buffer->data) {
/* get the file desc associated with the perf data file */
int fd = perf_data__fd(etmq->etm->session->data);
aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
if (!aux_buffer->data)
return -ENOMEM;
}
/* If valid, drop the previous buffer */
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
etmq->buf_used = 0;
etmq->buf_len = aux_buffer->size;
etmq->buf = aux_buffer->data;
return etmq->buf_len;
}
static void cs_etm__set_thread(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq, pid_t tid,
ocsd_ex_level el)
{
struct machine *machine = cs_etm__get_machine(etmq, el);
if (tid != -1) {
thread__zput(tidq->thread);
tidq->thread = machine__find_thread(machine, -1, tid);
}
/* Couldn't find a known thread */
if (!tidq->thread)
tidq->thread = machine__idle_thread(machine);
tidq->el = el;
}
int cs_etm__etmq_set_tid_el(struct cs_etm_queue *etmq, pid_t tid,
u8 trace_chan_id, ocsd_ex_level el)
{
struct cs_etm_traceid_queue *tidq;
tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
if (!tidq)
return -EINVAL;
cs_etm__set_thread(etmq, tidq, tid, el);
return 0;
}
bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
{
return !!etmq->etm->timeless_decoding;
}
static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
u64 trace_chan_id,
const struct cs_etm_packet *packet,
struct perf_sample *sample)
{
/*
* It's pointless to read instructions for the CS_ETM_DISCONTINUITY
* packet, so directly bail out with 'insn_len' = 0.
*/
if (packet->sample_type == CS_ETM_DISCONTINUITY) {
sample->insn_len = 0;
return;
}
/*
* T32 instruction size might be 32-bit or 16-bit, decide by calling
* cs_etm__t32_instr_size().
*/
if (packet->isa == CS_ETM_ISA_T32)
sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
sample->ip);
/* Otherwise, A64 and A32 instruction size are always 32-bit. */
else
sample->insn_len = 4;
cs_etm__mem_access(etmq, trace_chan_id, sample->ip, sample->insn_len,
(void *)sample->insn, 0);
}
u64 cs_etm__convert_sample_time(struct cs_etm_queue *etmq, u64 cs_timestamp)
{
struct cs_etm_auxtrace *etm = etmq->etm;
if (etm->has_virtual_ts)
return tsc_to_perf_time(cs_timestamp, &etm->tc);
else
return cs_timestamp;
}
static inline u64 cs_etm__resolve_sample_time(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
struct cs_etm_auxtrace *etm = etmq->etm;
struct cs_etm_packet_queue *packet_queue = &tidq->packet_queue;
if (!etm->timeless_decoding && etm->has_virtual_ts)
return packet_queue->cs_timestamp;
else
return etm->latest_kernel_timestamp;
}
static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq,
u64 addr, u64 period)
{
int ret = 0;
struct cs_etm_auxtrace *etm = etmq->etm;
union perf_event *event = tidq->event_buf;
struct perf_sample sample = {.ip = 0,};
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = cs_etm__cpu_mode(etmq, addr, tidq->el);
event->sample.header.size = sizeof(struct perf_event_header);
/* Set time field based on etm auxtrace config. */
sample.time = cs_etm__resolve_sample_time(etmq, tidq);
sample.ip = addr;
sample.pid = thread__pid(tidq->thread);
sample.tid = thread__tid(tidq->thread);
sample.id = etmq->etm->instructions_id;
sample.stream_id = etmq->etm->instructions_id;
sample.period = period;
sample.cpu = tidq->packet->cpu;
sample.flags = tidq->prev_packet->flags;
sample.cpumode = event->sample.header.misc;
cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
if (etm->synth_opts.last_branch)
sample.branch_stack = tidq->last_branch;
if (etm->synth_opts.inject) {
ret = cs_etm__inject_event(event, &sample,
etm->instructions_sample_type);
if (ret)
return ret;
}
ret = perf_session__deliver_synth_event(etm->session, event, &sample);
if (ret)
pr_err(
"CS ETM Trace: failed to deliver instruction event, error %d\n",
ret);
return ret;
}
/*
* The cs etm packet encodes an instruction range between a branch target
* and the next taken branch. Generate sample accordingly.
*/
static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
int ret = 0;
struct cs_etm_auxtrace *etm = etmq->etm;
struct perf_sample sample = {.ip = 0,};
union perf_event *event = tidq->event_buf;
struct dummy_branch_stack {
u64 nr;
u64 hw_idx;
struct branch_entry entries;
} dummy_bs;
u64 ip;
ip = cs_etm__last_executed_instr(tidq->prev_packet);
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = cs_etm__cpu_mode(etmq, ip,
tidq->prev_packet_el);
event->sample.header.size = sizeof(struct perf_event_header);
/* Set time field based on etm auxtrace config. */
sample.time = cs_etm__resolve_sample_time(etmq, tidq);
sample.ip = ip;
sample.pid = thread__pid(tidq->prev_packet_thread);
sample.tid = thread__tid(tidq->prev_packet_thread);
sample.addr = cs_etm__first_executed_instr(tidq->packet);
sample.id = etmq->etm->branches_id;
sample.stream_id = etmq->etm->branches_id;
sample.period = 1;
sample.cpu = tidq->packet->cpu;
sample.flags = tidq->prev_packet->flags;
sample.cpumode = event->sample.header.misc;
cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
&sample);
/*
* perf report cannot handle events without a branch stack
*/
if (etm->synth_opts.last_branch) {
dummy_bs = (struct dummy_branch_stack){
.nr = 1,
.hw_idx = -1ULL,
.entries = {
.from = sample.ip,
.to = sample.addr,
},
};
sample.branch_stack = (struct branch_stack *)&dummy_bs;
}
if (etm->synth_opts.inject) {
ret = cs_etm__inject_event(event, &sample,
etm->branches_sample_type);
if (ret)
return ret;
}
ret = perf_session__deliver_synth_event(etm->session, event, &sample);
if (ret)
pr_err(
"CS ETM Trace: failed to deliver instruction event, error %d\n",
ret);
return ret;
}
struct cs_etm_synth {
struct perf_tool dummy_tool;
struct perf_session *session;
};
static int cs_etm__event_synth(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct cs_etm_synth *cs_etm_synth =
container_of(tool, struct cs_etm_synth, dummy_tool);
return perf_session__deliver_synth_event(cs_etm_synth->session,
event, NULL);
}
static int cs_etm__synth_event(struct perf_session *session,
struct perf_event_attr *attr, u64 id)
{
struct cs_etm_synth cs_etm_synth;
memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
cs_etm_synth.session = session;
return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
&id, cs_etm__event_synth);
}
static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel;
struct perf_event_attr attr;
bool found = false;
u64 id;
int err;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == etm->pmu_type) {
found = true;
break;
}
}
if (!found) {
pr_debug("No selected events with CoreSight Trace data\n");
return 0;
}
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.size = sizeof(struct perf_event_attr);
attr.type = PERF_TYPE_HARDWARE;
attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
PERF_SAMPLE_PERIOD;
if (etm->timeless_decoding)
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
else
attr.sample_type |= PERF_SAMPLE_TIME;
attr.exclude_user = evsel->core.attr.exclude_user;
attr.exclude_kernel = evsel->core.attr.exclude_kernel;
attr.exclude_hv = evsel->core.attr.exclude_hv;
attr.exclude_host = evsel->core.attr.exclude_host;
attr.exclude_guest = evsel->core.attr.exclude_guest;
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
/* create new id val to be a fixed offset from evsel id */
id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
if (etm->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
etm->branches_sample_type = attr.sample_type;
etm->branches_id = id;
id += 1;
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
if (etm->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
/*
* We don't use the hardware index, but the sample generation
* code uses the new format branch_stack with this field,
* so the event attributes must indicate that it's present.
*/
attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
}
if (etm->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
attr.sample_period = etm->synth_opts.period;
etm->instructions_sample_period = attr.sample_period;
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
etm->instructions_sample_type = attr.sample_type;
etm->instructions_id = id;
id += 1;
}
return 0;
}
static int cs_etm__sample(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
struct cs_etm_auxtrace *etm = etmq->etm;
int ret;
u8 trace_chan_id = tidq->trace_chan_id;
u64 instrs_prev;
/* Get instructions remainder from previous packet */
instrs_prev = tidq->period_instructions;
tidq->period_instructions += tidq->packet->instr_count;
/*
* Record a branch when the last instruction in
* PREV_PACKET is a branch.
*/
if (etm->synth_opts.last_branch &&
tidq->prev_packet->sample_type == CS_ETM_RANGE &&
tidq->prev_packet->last_instr_taken_branch)
cs_etm__update_last_branch_rb(etmq, tidq);
if (etm->synth_opts.instructions &&
tidq->period_instructions >= etm->instructions_sample_period) {
/*
* Emit instruction sample periodically
* TODO: allow period to be defined in cycles and clock time
*/
/*
* Below diagram demonstrates the instruction samples
* generation flows:
*
* Instrs Instrs Instrs Instrs
* Sample(n) Sample(n+1) Sample(n+2) Sample(n+3)
* | | | |
* V V V V
* --------------------------------------------------
* ^ ^
* | |
* Period Period
* instructions(Pi) instructions(Pi')
*
* | |
* \---------------- -----------------/
* V
* tidq->packet->instr_count
*
* Instrs Sample(n...) are the synthesised samples occurring
* every etm->instructions_sample_period instructions - as
* defined on the perf command line. Sample(n) is being the
* last sample before the current etm packet, n+1 to n+3
* samples are generated from the current etm packet.
*
* tidq->packet->instr_count represents the number of
* instructions in the current etm packet.
*
* Period instructions (Pi) contains the number of
* instructions executed after the sample point(n) from the
* previous etm packet. This will always be less than
* etm->instructions_sample_period.
*
* When generate new samples, it combines with two parts
* instructions, one is the tail of the old packet and another
* is the head of the new coming packet, to generate
* sample(n+1); sample(n+2) and sample(n+3) consume the
* instructions with sample period. After sample(n+3), the rest
* instructions will be used by later packet and it is assigned
* to tidq->period_instructions for next round calculation.
*/
/*
* Get the initial offset into the current packet instructions;
* entry conditions ensure that instrs_prev is less than
* etm->instructions_sample_period.
*/
u64 offset = etm->instructions_sample_period - instrs_prev;
u64 addr;
/* Prepare last branches for instruction sample */
if (etm->synth_opts.last_branch)
cs_etm__copy_last_branch_rb(etmq, tidq);
while (tidq->period_instructions >=
etm->instructions_sample_period) {
/*
* Calculate the address of the sampled instruction (-1
* as sample is reported as though instruction has just
* been executed, but PC has not advanced to next
* instruction)
*/
addr = cs_etm__instr_addr(etmq, trace_chan_id,
tidq->packet, offset - 1);
ret = cs_etm__synth_instruction_sample(
etmq, tidq, addr,
etm->instructions_sample_period);
if (ret)
return ret;
offset += etm->instructions_sample_period;
tidq->period_instructions -=
etm->instructions_sample_period;
}
}
if (etm->synth_opts.branches) {
bool generate_sample = false;
/* Generate sample for tracing on packet */
if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
generate_sample = true;
/* Generate sample for branch taken packet */
if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
tidq->prev_packet->last_instr_taken_branch)
generate_sample = true;
if (generate_sample) {
ret = cs_etm__synth_branch_sample(etmq, tidq);
if (ret)
return ret;
}
}
cs_etm__packet_swap(etm, tidq);
return 0;
}
static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
{
/*
* When the exception packet is inserted, whether the last instruction
* in previous range packet is taken branch or not, we need to force
* to set 'prev_packet->last_instr_taken_branch' to true. This ensures
* to generate branch sample for the instruction range before the
* exception is trapped to kernel or before the exception returning.
*
* The exception packet includes the dummy address values, so don't
* swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
* for generating instruction and branch samples.
*/
if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
tidq->prev_packet->last_instr_taken_branch = true;
return 0;
}
static int cs_etm__flush(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
int err = 0;
struct cs_etm_auxtrace *etm = etmq->etm;
/* Handle start tracing packet */
if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
goto swap_packet;
if (etmq->etm->synth_opts.last_branch &&
etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
/* Prepare last branches for instruction sample */
cs_etm__copy_last_branch_rb(etmq, tidq);
/*
* Generate a last branch event for the branches left in the
* circular buffer at the end of the trace.
*
* Use the address of the end of the last reported execution
* range
*/
addr = cs_etm__last_executed_instr(tidq->prev_packet);
err = cs_etm__synth_instruction_sample(
etmq, tidq, addr,
tidq->period_instructions);
if (err)
return err;
tidq->period_instructions = 0;
}
if (etm->synth_opts.branches &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
err = cs_etm__synth_branch_sample(etmq, tidq);
if (err)
return err;
}
swap_packet:
cs_etm__packet_swap(etm, tidq);
/* Reset last branches after flush the trace */
if (etm->synth_opts.last_branch)
cs_etm__reset_last_branch_rb(tidq);
return err;
}
static int cs_etm__end_block(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
int err;
/*
* It has no new packet coming and 'etmq->packet' contains the stale
* packet which was set at the previous time with packets swapping;
* so skip to generate branch sample to avoid stale packet.
*
* For this case only flush branch stack and generate a last branch
* event for the branches left in the circular buffer at the end of
* the trace.
*/
if (etmq->etm->synth_opts.last_branch &&
etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
/* Prepare last branches for instruction sample */
cs_etm__copy_last_branch_rb(etmq, tidq);
/*
* Use the address of the end of the last reported execution
* range.
*/
addr = cs_etm__last_executed_instr(tidq->prev_packet);
err = cs_etm__synth_instruction_sample(
etmq, tidq, addr,
tidq->period_instructions);
if (err)
return err;
tidq->period_instructions = 0;
}
return 0;
}
/*
* cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
* if need be.
* Returns: < 0 if error
* = 0 if no more auxtrace_buffer to read
* > 0 if the current buffer isn't empty yet
*/
static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
{
int ret;
if (!etmq->buf_len) {
ret = cs_etm__get_trace(etmq);
if (ret <= 0)
return ret;
/*
* We cannot assume consecutive blocks in the data file
* are contiguous, reset the decoder to force re-sync.
*/
ret = cs_etm_decoder__reset(etmq->decoder);
if (ret)
return ret;
}
return etmq->buf_len;
}
static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
struct cs_etm_packet *packet,
u64 end_addr)
{
/* Initialise to keep compiler happy */
u16 instr16 = 0;
u32 instr32 = 0;
u64 addr;
switch (packet->isa) {
case CS_ETM_ISA_T32:
/*
* The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
*
* b'15 b'8
* +-----------------+--------+
* | 1 1 0 1 1 1 1 1 | imm8 |
* +-----------------+--------+
*
* According to the specification, it only defines SVC for T32
* with 16 bits instruction and has no definition for 32bits;
* so below only read 2 bytes as instruction size for T32.
*/
addr = end_addr - 2;
cs_etm__mem_access(etmq, trace_chan_id, addr, sizeof(instr16),
(u8 *)&instr16, 0);
if ((instr16 & 0xFF00) == 0xDF00)
return true;
break;
case CS_ETM_ISA_A32:
/*
* The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
*
* b'31 b'28 b'27 b'24
* +---------+---------+-------------------------+
* | !1111 | 1 1 1 1 | imm24 |
* +---------+---------+-------------------------+
*/
addr = end_addr - 4;
cs_etm__mem_access(etmq, trace_chan_id, addr, sizeof(instr32),
(u8 *)&instr32, 0);
if ((instr32 & 0x0F000000) == 0x0F000000 &&
(instr32 & 0xF0000000) != 0xF0000000)
return true;
break;
case CS_ETM_ISA_A64:
/*
* The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
*
* b'31 b'21 b'4 b'0
* +-----------------------+---------+-----------+
* | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
* +-----------------------+---------+-----------+
*/
addr = end_addr - 4;
cs_etm__mem_access(etmq, trace_chan_id, addr, sizeof(instr32),
(u8 *)&instr32, 0);
if ((instr32 & 0xFFE0001F) == 0xd4000001)
return true;
break;
case CS_ETM_ISA_UNKNOWN:
default:
break;
}
return false;
}
static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq, u64 magic)
{
u8 trace_chan_id = tidq->trace_chan_id;
struct cs_etm_packet *packet = tidq->packet;
struct cs_etm_packet *prev_packet = tidq->prev_packet;
if (magic == __perf_cs_etmv3_magic)
if (packet->exception_number == CS_ETMV3_EXC_SVC)
return true;
/*
* ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
* HVC cases; need to check if it's SVC instruction based on
* packet address.
*/
if (magic == __perf_cs_etmv4_magic) {
if (packet->exception_number == CS_ETMV4_EXC_CALL &&
cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
prev_packet->end_addr))
return true;
}
return false;
}
static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
u64 magic)
{
struct cs_etm_packet *packet = tidq->packet;
if (magic == __perf_cs_etmv3_magic)
if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
packet->exception_number == CS_ETMV3_EXC_IRQ ||
packet->exception_number == CS_ETMV3_EXC_FIQ)
return true;
if (magic == __perf_cs_etmv4_magic)
if (packet->exception_number == CS_ETMV4_EXC_RESET ||
packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
packet->exception_number == CS_ETMV4_EXC_IRQ ||
packet->exception_number == CS_ETMV4_EXC_FIQ)
return true;
return false;
}
static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq,
u64 magic)
{
u8 trace_chan_id = tidq->trace_chan_id;
struct cs_etm_packet *packet = tidq->packet;
struct cs_etm_packet *prev_packet = tidq->prev_packet;
if (magic == __perf_cs_etmv3_magic)
if (packet->exception_number == CS_ETMV3_EXC_SMC ||
packet->exception_number == CS_ETMV3_EXC_HYP ||
packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
packet->exception_number == CS_ETMV3_EXC_GENERIC)
return true;
if (magic == __perf_cs_etmv4_magic) {
if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
return true;
/*
* For CS_ETMV4_EXC_CALL, except SVC other instructions
* (SMC, HVC) are taken as sync exceptions.
*/
if (packet->exception_number == CS_ETMV4_EXC_CALL &&
!cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
prev_packet->end_addr))
return true;
/*
* ETMv4 has 5 bits for exception number; if the numbers
* are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
* they are implementation defined exceptions.
*
* For this case, simply take it as sync exception.
*/
if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
packet->exception_number <= CS_ETMV4_EXC_END)
return true;
}
return false;
}
static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
struct cs_etm_packet *packet = tidq->packet;
struct cs_etm_packet *prev_packet = tidq->prev_packet;
u8 trace_chan_id = tidq->trace_chan_id;
u64 magic;
int ret;
switch (packet->sample_type) {
case CS_ETM_RANGE:
/*
* Immediate branch instruction without neither link nor
* return flag, it's normal branch instruction within
* the function.
*/
if (packet->last_instr_type == OCSD_INSTR_BR &&
packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
packet->flags = PERF_IP_FLAG_BRANCH;
if (packet->last_instr_cond)
packet->flags |= PERF_IP_FLAG_CONDITIONAL;
}
/*
* Immediate branch instruction with link (e.g. BL), this is
* branch instruction for function call.
*/
if (packet->last_instr_type == OCSD_INSTR_BR &&
packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_CALL;
/*
* Indirect branch instruction with link (e.g. BLR), this is
* branch instruction for function call.
*/
if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_CALL;
/*
* Indirect branch instruction with subtype of
* OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
* function return for A32/T32.
*/
if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_RETURN;
/*
* Indirect branch instruction without link (e.g. BR), usually
* this is used for function return, especially for functions
* within dynamic link lib.
*/
if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
packet->last_instr_subtype == OCSD_S_INSTR_NONE)
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_RETURN;
/* Return instruction for function return. */
if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_RETURN;
/*
* Decoder might insert a discontinuity in the middle of
* instruction packets, fixup prev_packet with flag
* PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
*/
if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
prev_packet->flags |= PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_TRACE_BEGIN;
/*
* If the previous packet is an exception return packet
* and the return address just follows SVC instruction,
* it needs to calibrate the previous packet sample flags
* as PERF_IP_FLAG_SYSCALLRET.
*/
if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_INTERRUPT) &&
cs_etm__is_svc_instr(etmq, trace_chan_id,
packet, packet->start_addr))
prev_packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_SYSCALLRET;
break;
case CS_ETM_DISCONTINUITY:
/*
* The trace is discontinuous, if the previous packet is
* instruction packet, set flag PERF_IP_FLAG_TRACE_END
* for previous packet.
*/
if (prev_packet->sample_type == CS_ETM_RANGE)
prev_packet->flags |= PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_TRACE_END;
break;
case CS_ETM_EXCEPTION:
ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
if (ret)
return ret;
/* The exception is for system call. */
if (cs_etm__is_syscall(etmq, tidq, magic))
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_CALL |
PERF_IP_FLAG_SYSCALLRET;
/*
* The exceptions are triggered by external signals from bus,
* interrupt controller, debug module, PE reset or halt.
*/
else if (cs_etm__is_async_exception(tidq, magic))
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT;
/*
* Otherwise, exception is caused by trap, instruction &
* data fault, or alignment errors.
*/
else if (cs_etm__is_sync_exception(etmq, tidq, magic))
packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_CALL |
PERF_IP_FLAG_INTERRUPT;
/*
* When the exception packet is inserted, since exception
* packet is not used standalone for generating samples
* and it's affiliation to the previous instruction range
* packet; so set previous range packet flags to tell perf
* it is an exception taken branch.
*/
if (prev_packet->sample_type == CS_ETM_RANGE)
prev_packet->flags = packet->flags;
break;
case CS_ETM_EXCEPTION_RET:
/*
* When the exception return packet is inserted, since
* exception return packet is not used standalone for
* generating samples and it's affiliation to the previous
* instruction range packet; so set previous range packet
* flags to tell perf it is an exception return branch.
*
* The exception return can be for either system call or
* other exception types; unfortunately the packet doesn't
* contain exception type related info so we cannot decide
* the exception type purely based on exception return packet.
* If we record the exception number from exception packet and
* reuse it for exception return packet, this is not reliable
* due the trace can be discontinuity or the interrupt can
* be nested, thus the recorded exception number cannot be
* used for exception return packet for these two cases.
*
* For exception return packet, we only need to distinguish the
* packet is for system call or for other types. Thus the
* decision can be deferred when receive the next packet which
* contains the return address, based on the return address we
* can read out the previous instruction and check if it's a
* system call instruction and then calibrate the sample flag
* as needed.
*/
if (prev_packet->sample_type == CS_ETM_RANGE)
prev_packet->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_INTERRUPT;
break;
case CS_ETM_EMPTY:
default:
break;
}
return 0;
}
static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
{
int ret = 0;
size_t processed = 0;
/*
* Packets are decoded and added to the decoder's packet queue
* until the decoder packet processing callback has requested that
* processing stops or there is nothing left in the buffer. Normal
* operations that stop processing are a timestamp packet or a full
* decoder buffer queue.
*/
ret = cs_etm_decoder__process_data_block(etmq->decoder,
etmq->offset,
&etmq->buf[etmq->buf_used],
etmq->buf_len,
&processed);
if (ret)
goto out;
etmq->offset += processed;
etmq->buf_used += processed;
etmq->buf_len -= processed;
out:
return ret;
}
static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
{
int ret;
struct cs_etm_packet_queue *packet_queue;
packet_queue = &tidq->packet_queue;
/* Process each packet in this chunk */
while (1) {
ret = cs_etm_decoder__get_packet(packet_queue,
tidq->packet);
if (ret <= 0)
/*
* Stop processing this chunk on
* end of data or error
*/
break;
/*
* Since packet addresses are swapped in packet
* handling within below switch() statements,
* thus setting sample flags must be called
* prior to switch() statement to use address
* information before packets swapping.
*/
ret = cs_etm__set_sample_flags(etmq, tidq);
if (ret < 0)
break;
switch (tidq->packet->sample_type) {
case CS_ETM_RANGE:
/*
* If the packet contains an instruction
* range, generate instruction sequence
* events.
*/
cs_etm__sample(etmq, tidq);
break;
case CS_ETM_EXCEPTION:
case CS_ETM_EXCEPTION_RET:
/*
* If the exception packet is coming,
* make sure the previous instruction
* range packet to be handled properly.
*/
cs_etm__exception(tidq);
break;
case CS_ETM_DISCONTINUITY:
/*
* Discontinuity in trace, flush
* previous branch stack
*/
cs_etm__flush(etmq, tidq);
break;
case CS_ETM_EMPTY:
/*
* Should not receive empty packet,
* report error.
*/
pr_err("CS ETM Trace: empty packet\n");
return -EINVAL;
default:
break;
}
}
return ret;
}
static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
{
int idx;
struct int_node *inode;
struct cs_etm_traceid_queue *tidq;
struct intlist *traceid_queues_list = etmq->traceid_queues_list;
intlist__for_each_entry(inode, traceid_queues_list) {
idx = (int)(intptr_t)inode->priv;
tidq = etmq->traceid_queues[idx];
/* Ignore return value */
cs_etm__process_traceid_queue(etmq, tidq);
/*
* Generate an instruction sample with the remaining
* branchstack entries.
*/
cs_etm__flush(etmq, tidq);
}
}
static int cs_etm__run_per_thread_timeless_decoder(struct cs_etm_queue *etmq)
{
int err = 0;
struct cs_etm_traceid_queue *tidq;
tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
if (!tidq)
return -EINVAL;
/* Go through each buffer in the queue and decode them one by one */
while (1) {
err = cs_etm__get_data_block(etmq);
if (err <= 0)
return err;
/* Run trace decoder until buffer consumed or end of trace */
do {
err = cs_etm__decode_data_block(etmq);
if (err)
return err;
/*
* Process each packet in this chunk, nothing to do if
* an error occurs other than hoping the next one will
* be better.
*/
err = cs_etm__process_traceid_queue(etmq, tidq);
} while (etmq->buf_len);
if (err == 0)
/* Flush any remaining branch stack entries */
err = cs_etm__end_block(etmq, tidq);
}
return err;
}
static int cs_etm__run_per_cpu_timeless_decoder(struct cs_etm_queue *etmq)
{
int idx, err = 0;
struct cs_etm_traceid_queue *tidq;
struct int_node *inode;
/* Go through each buffer in the queue and decode them one by one */
while (1) {
err = cs_etm__get_data_block(etmq);
if (err <= 0)
return err;
/* Run trace decoder until buffer consumed or end of trace */
do {
err = cs_etm__decode_data_block(etmq);
if (err)
return err;
/*
* cs_etm__run_per_thread_timeless_decoder() runs on a
* single traceID queue because each TID has a separate
* buffer. But here in per-cpu mode we need to iterate
* over each channel instead.
*/
intlist__for_each_entry(inode,
etmq->traceid_queues_list) {
idx = (int)(intptr_t)inode->priv;
tidq = etmq->traceid_queues[idx];
cs_etm__process_traceid_queue(etmq, tidq);
}
} while (etmq->buf_len);
intlist__for_each_entry(inode, etmq->traceid_queues_list) {
idx = (int)(intptr_t)inode->priv;
tidq = etmq->traceid_queues[idx];
/* Flush any remaining branch stack entries */
err = cs_etm__end_block(etmq, tidq);
if (err)
return err;
}
}
return err;
}
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
pid_t tid)
{
unsigned int i;
struct auxtrace_queues *queues = &etm->queues;
for (i = 0; i < queues->nr_queues; i++) {
struct auxtrace_queue *queue = &etm->queues.queue_array[i];
struct cs_etm_queue *etmq = queue->priv;
struct cs_etm_traceid_queue *tidq;
if (!etmq)
continue;
if (etm->per_thread_decoding) {
tidq = cs_etm__etmq_get_traceid_queue(
etmq, CS_ETM_PER_THREAD_TRACEID);
if (!tidq)
continue;
if (tid == -1 || thread__tid(tidq->thread) == tid)
cs_etm__run_per_thread_timeless_decoder(etmq);
} else
cs_etm__run_per_cpu_timeless_decoder(etmq);
}
return 0;
}
static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm)
{
int ret = 0;
unsigned int cs_queue_nr, queue_nr, i;
u8 trace_chan_id;
u64 cs_timestamp;
struct auxtrace_queue *queue;
struct cs_etm_queue *etmq;
struct cs_etm_traceid_queue *tidq;
/*
* Pre-populate the heap with one entry from each queue so that we can
* start processing in time order across all queues.
*/
for (i = 0; i < etm->queues.nr_queues; i++) {
etmq = etm->queues.queue_array[i].priv;
if (!etmq)
continue;
ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
if (ret)
return ret;
}
while (1) {
if (!etm->heap.heap_cnt)
goto out;
/* Take the entry at the top of the min heap */
cs_queue_nr = etm->heap.heap_array[0].queue_nr;
queue_nr = TO_QUEUE_NR(cs_queue_nr);
trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
queue = &etm->queues.queue_array[queue_nr];
etmq = queue->priv;
/*
* Remove the top entry from the heap since we are about
* to process it.
*/
auxtrace_heap__pop(&etm->heap);
tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
if (!tidq) {
/*
* No traceID queue has been allocated for this traceID,
* which means something somewhere went very wrong. No
* other choice than simply exit.
*/
ret = -EINVAL;
goto out;
}
/*
* Packets associated with this timestamp are already in
* the etmq's traceID queue, so process them.
*/
ret = cs_etm__process_traceid_queue(etmq, tidq);
if (ret < 0)
goto out;
/*
* Packets for this timestamp have been processed, time to
* move on to the next timestamp, fetching a new auxtrace_buffer
* if need be.
*/
refetch:
ret = cs_etm__get_data_block(etmq);
if (ret < 0)
goto out;
/*
* No more auxtrace_buffers to process in this etmq, simply
* move on to another entry in the auxtrace_heap.
*/
if (!ret)
continue;
ret = cs_etm__decode_data_block(etmq);
if (ret)
goto out;
cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
if (!cs_timestamp) {
/*
* Function cs_etm__decode_data_block() returns when
* there is no more traces to decode in the current
* auxtrace_buffer OR when a timestamp has been
* encountered on any of the traceID queues. Since we
* did not get a timestamp, there is no more traces to
* process in this auxtrace_buffer. As such empty and
* flush all traceID queues.
*/
cs_etm__clear_all_traceid_queues(etmq);
/* Fetch another auxtrace_buffer for this etmq */
goto refetch;
}
/*
* Add to the min heap the timestamp for packets that have
* just been decoded. They will be processed and synthesized
* during the next call to cs_etm__process_traceid_queue() for
* this queue/traceID.
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
}
out:
return ret;
}
static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
union perf_event *event)
{
struct thread *th;
if (etm->timeless_decoding)
return 0;
/*
* Add the tid/pid to the log so that we can get a match when we get a
* contextID from the decoder. Only track for the host: only kernel
* trace is supported for guests which wouldn't need pids so this should
* be fine.
*/
th = machine__findnew_thread(&etm->session->machines.host,
event->itrace_start.pid,
event->itrace_start.tid);
if (!th)
return -ENOMEM;
thread__put(th);
return 0;
}
static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
union perf_event *event)
{
struct thread *th;
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
/*
* Context switch in per-thread mode are irrelevant since perf
* will start/stop tracing as the process is scheduled.
*/
if (etm->timeless_decoding)
return 0;
/*
* SWITCH_IN events carry the next process to be switched out while
* SWITCH_OUT events carry the process to be switched in. As such
* we don't care about IN events.
*/
if (!out)
return 0;
/*
* Add the tid/pid to the log so that we can get a match when we get a
* contextID from the decoder. Only track for the host: only kernel
* trace is supported for guests which wouldn't need pids so this should
* be fine.
*/
th = machine__findnew_thread(&etm->session->machines.host,
event->context_switch.next_prev_pid,
event->context_switch.next_prev_tid);
if (!th)
return -ENOMEM;
thread__put(th);
return 0;
}
static int cs_etm__process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
{
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
if (dump_trace)
return 0;
if (!tool->ordered_events) {
pr_err("CoreSight ETM Trace requires ordered events\n");
return -EINVAL;
}
switch (event->header.type) {
case PERF_RECORD_EXIT:
/*
* Don't need to wait for cs_etm__flush_events() in per-thread mode to
* start the decode because we know there will be no more trace from
* this thread. All this does is emit samples earlier than waiting for
* the flush in other modes, but with timestamps it makes sense to wait
* for flush so that events from different threads are interleaved
* properly.
*/
if (etm->per_thread_decoding && etm->timeless_decoding)
return cs_etm__process_timeless_queues(etm,
event->fork.tid);
break;
case PERF_RECORD_ITRACE_START:
return cs_etm__process_itrace_start(etm, event);
case PERF_RECORD_SWITCH_CPU_WIDE:
return cs_etm__process_switch_cpu_wide(etm, event);
case PERF_RECORD_AUX:
/*
* Record the latest kernel timestamp available in the header
* for samples so that synthesised samples occur from this point
* onwards.
*/
if (sample->time && (sample->time != (u64)-1))
etm->latest_kernel_timestamp = sample->time;
break;
default:
break;
}
return 0;
}
static void dump_queued_data(struct cs_etm_auxtrace *etm,
struct perf_record_auxtrace *event)
{
struct auxtrace_buffer *buf;
unsigned int i;
/*
* Find all buffers with same reference in the queues and dump them.
* This is because the queues can contain multiple entries of the same
* buffer that were split on aux records.
*/
for (i = 0; i < etm->queues.nr_queues; ++i)
list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
if (buf->reference == event->reference)
cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
}
static int cs_etm__process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
{
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
if (!etm->data_queued) {
struct auxtrace_buffer *buffer;
off_t data_offset;
int fd = perf_data__fd(session->data);
bool is_pipe = perf_data__is_pipe(session->data);
int err;
int idx = event->auxtrace.idx;
if (is_pipe)
data_offset = 0;
else {
data_offset = lseek(fd, 0, SEEK_CUR);
if (data_offset == -1)
return -errno;
}
err = auxtrace_queues__add_event(&etm->queues, session,
event, data_offset, &buffer);
if (err)
return err;
/*
* Knowing if the trace is formatted or not requires a lookup of
* the aux record so only works in non-piped mode where data is
* queued in cs_etm__queue_aux_records(). Always assume
* formatted in piped mode (true).
*/
err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
idx, true);
if (err)
return err;
if (dump_trace)
if (auxtrace_buffer__get_data(buffer, fd)) {
cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
auxtrace_buffer__put_data(buffer);
}
} else if (dump_trace)
dump_queued_data(etm, &event->auxtrace);
return 0;
}
static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
{
struct evsel *evsel;
struct evlist *evlist = etm->session->evlist;
/* Override timeless mode with user input from --itrace=Z */
if (etm->synth_opts.timeless_decoding) {
etm->timeless_decoding = true;
return 0;
}
/*
* Find the cs_etm evsel and look at what its timestamp setting was
*/
evlist__for_each_entry(evlist, evsel)
if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
etm->timeless_decoding =
!(evsel->core.attr.config & BIT(ETM_OPT_TS));
return 0;
}
pr_err("CS ETM: Couldn't find ETM evsel\n");
return -EINVAL;
}
/*
* Read a single cpu parameter block from the auxtrace_info priv block.
*
* For version 1 there is a per cpu nr_params entry. If we are handling
* version 1 file, then there may be less, the same, or more params
* indicated by this value than the compile time number we understand.
*
* For a version 0 info block, there are a fixed number, and we need to
* fill out the nr_param value in the metadata we create.
*/
static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
int out_blk_size, int nr_params_v0)
{
u64 *metadata = NULL;
int hdr_version;
int nr_in_params, nr_out_params, nr_cmn_params;
int i, k;
metadata = zalloc(sizeof(*metadata) * out_blk_size);
if (!metadata)
return NULL;
/* read block current index & version */
i = *buff_in_offset;
hdr_version = buff_in[CS_HEADER_VERSION];
if (!hdr_version) {
/* read version 0 info block into a version 1 metadata block */
nr_in_params = nr_params_v0;
metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
/* remaining block params at offset +1 from source */
for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
metadata[k + 1] = buff_in[i + k];
/* version 0 has 2 common params */
nr_cmn_params = 2;
} else {
/* read version 1 info block - input and output nr_params may differ */
/* version 1 has 3 common params */
nr_cmn_params = 3;
nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
/* if input has more params than output - skip excess */
nr_out_params = nr_in_params + nr_cmn_params;
if (nr_out_params > out_blk_size)
nr_out_params = out_blk_size;
for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
metadata[k] = buff_in[i + k];
/* record the actual nr params we copied */
metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
}
/* adjust in offset by number of in params used */
i += nr_in_params + nr_cmn_params;
*buff_in_offset = i;
return metadata;
}
/**
* Puts a fragment of an auxtrace buffer into the auxtrace queues based
* on the bounds of aux_event, if it matches with the buffer that's at
* file_offset.
*
* Normally, whole auxtrace buffers would be added to the queue. But we
* want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
* is reset across each buffer, so splitting the buffers up in advance has
* the same effect.
*/
static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
struct perf_record_aux *aux_event, struct perf_sample *sample)
{
int err;
char buf[PERF_SAMPLE_MAX_SIZE];
union perf_event *auxtrace_event_union;
struct perf_record_auxtrace *auxtrace_event;
union perf_event auxtrace_fragment;
__u64 aux_offset, aux_size;
__u32 idx;
bool formatted;
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
/*
* There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
* from looping through the auxtrace index.
*/
err = perf_session__peek_event(session, file_offset, buf,
PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
if (err)
return err;
auxtrace_event = &auxtrace_event_union->auxtrace;
if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
return -EINVAL;
if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
auxtrace_event->header.size != sz) {
return -EINVAL;
}
/*
* In per-thread mode, auxtrace CPU is set to -1, but TID will be set instead. See
* auxtrace_mmap_params__set_idx(). However, the sample AUX event will contain a
* CPU as we set this always for the AUX_OUTPUT_HW_ID event.
* So now compare only TIDs if auxtrace CPU is -1, and CPUs if auxtrace CPU is not -1.
* Return 'not found' if mismatch.
*/
if (auxtrace_event->cpu == (__u32) -1) {
etm->per_thread_decoding = true;
if (auxtrace_event->tid != sample->tid)
return 1;
} else if (auxtrace_event->cpu != sample->cpu) {
if (etm->per_thread_decoding) {
/*
* Found a per-cpu buffer after a per-thread one was
* already found
*/
pr_err("CS ETM: Inconsistent per-thread/per-cpu mode.\n");
return -EINVAL;
}
return 1;
}
if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
/*
* Clamp size in snapshot mode. The buffer size is clamped in
* __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
* the buffer size.
*/
aux_size = min(aux_event->aux_size, auxtrace_event->size);
/*
* In this mode, the head also points to the end of the buffer so aux_offset
* needs to have the size subtracted so it points to the beginning as in normal mode
*/
aux_offset = aux_event->aux_offset - aux_size;
} else {
aux_size = aux_event->aux_size;
aux_offset = aux_event->aux_offset;
}
if (aux_offset >= auxtrace_event->offset &&
aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
/*
* If this AUX event was inside this buffer somewhere, create a new auxtrace event
* based on the sizes of the aux event, and queue that fragment.
*/
auxtrace_fragment.auxtrace = *auxtrace_event;
auxtrace_fragment.auxtrace.size = aux_size;
auxtrace_fragment.auxtrace.offset = aux_offset;
file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
" tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
file_offset, NULL);
if (err)
return err;
idx = auxtrace_event->idx;
formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
idx, formatted);
}
/* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
return 1;
}
static int cs_etm__process_aux_hw_id_cb(struct perf_session *session, union perf_event *event,
u64 offset __maybe_unused, void *data __maybe_unused)
{
/* look to handle PERF_RECORD_AUX_OUTPUT_HW_ID early to ensure decoders can be set up */
if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) {
(*(int *)data)++; /* increment found count */
return cs_etm__process_aux_output_hw_id(session, event);
}
return 0;
}
static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
u64 offset __maybe_unused, void *data __maybe_unused)
{
struct perf_sample sample;
int ret;
struct auxtrace_index_entry *ent;
struct auxtrace_index *auxtrace_index;
struct evsel *evsel;
size_t i;
/* Don't care about any other events, we're only queuing buffers for AUX events */
if (event->header.type != PERF_RECORD_AUX)
return 0;
if (event->header.size < sizeof(struct perf_record_aux))
return -EINVAL;
/* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
if (!event->aux.aux_size)
return 0;
/*
* Parse the sample, we need the sample_id_all data that comes after the event so that the
* CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
*/
evsel = evlist__event2evsel(session->evlist, event);
if (!evsel)
return -EINVAL;
ret = evsel__parse_sample(evsel, event, &sample);
if (ret)
return ret;
/*
* Loop through the auxtrace index to find the buffer that matches up with this aux event.
*/
list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
for (i = 0; i < auxtrace_index->nr; i++) {
ent = &auxtrace_index->entries[i];
ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
ent->sz, &event->aux, &sample);
/*
* Stop search on error or successful values. Continue search on
* 1 ('not found')
*/
if (ret != 1)
return ret;
}
}
/*
* Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
* don't exit with an error because it will still be possible to decode other aux records.
*/
pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
" tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
return 0;
}
static int cs_etm__queue_aux_records(struct perf_session *session)
{
struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
struct auxtrace_index, list);
if (index && index->nr > 0)
return perf_session__peek_events(session, session->header.data_offset,
session->header.data_size,
cs_etm__queue_aux_records_cb, NULL);
/*
* We would get here if there are no entries in the index (either no auxtrace
* buffers or no index at all). Fail silently as there is the possibility of
* queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
* false.
*
* In that scenario, buffers will not be split by AUX records.
*/
return 0;
}
#define HAS_PARAM(j, type, param) (metadata[(j)][CS_ETM_NR_TRC_PARAMS] <= \
(CS_##type##_##param - CS_ETM_COMMON_BLK_MAX_V1))
/*
* Loop through the ETMs and complain if we find at least one where ts_source != 1 (virtual
* timestamps).
*/
static bool cs_etm__has_virtual_ts(u64 **metadata, int num_cpu)
{
int j;
for (j = 0; j < num_cpu; j++) {
switch (metadata[j][CS_ETM_MAGIC]) {
case __perf_cs_etmv4_magic:
if (HAS_PARAM(j, ETMV4, TS_SOURCE) || metadata[j][CS_ETMV4_TS_SOURCE] != 1)
return false;
break;
case __perf_cs_ete_magic:
if (HAS_PARAM(j, ETE, TS_SOURCE) || metadata[j][CS_ETE_TS_SOURCE] != 1)
return false;
break;
default:
/* Unknown / unsupported magic number. */
return false;
}
}
return true;
}
/* map trace ids to correct metadata block, from information in metadata */
static int cs_etm__map_trace_ids_metadata(int num_cpu, u64 **metadata)
{
u64 cs_etm_magic;
u8 trace_chan_id;
int i, err;
for (i = 0; i < num_cpu; i++) {
cs_etm_magic = metadata[i][CS_ETM_MAGIC];
switch (cs_etm_magic) {
case __perf_cs_etmv3_magic:
metadata[i][CS_ETM_ETMTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
trace_chan_id = (u8)(metadata[i][CS_ETM_ETMTRACEIDR]);
break;
case __perf_cs_etmv4_magic:
case __perf_cs_ete_magic:
metadata[i][CS_ETMV4_TRCTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
trace_chan_id = (u8)(metadata[i][CS_ETMV4_TRCTRACEIDR]);
break;
default:
/* unknown magic number */
return -EINVAL;
}
err = cs_etm__map_trace_id(trace_chan_id, metadata[i]);
if (err)
return err;
}
return 0;
}
/*
* If we found AUX_HW_ID packets, then set any metadata marked as unused to the
* unused value to reduce the number of unneeded decoders created.
*/
static int cs_etm__clear_unused_trace_ids_metadata(int num_cpu, u64 **metadata)
{
u64 cs_etm_magic;
int i;
for (i = 0; i < num_cpu; i++) {
cs_etm_magic = metadata[i][CS_ETM_MAGIC];
switch (cs_etm_magic) {
case __perf_cs_etmv3_magic:
if (metadata[i][CS_ETM_ETMTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
metadata[i][CS_ETM_ETMTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
break;
case __perf_cs_etmv4_magic:
case __perf_cs_ete_magic:
if (metadata[i][CS_ETMV4_TRCTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
metadata[i][CS_ETMV4_TRCTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
break;
default:
/* unknown magic number */
return -EINVAL;
}
}
return 0;
}
int cs_etm__process_auxtrace_info_full(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
struct cs_etm_auxtrace *etm = NULL;
struct perf_record_time_conv *tc = &session->time_conv;
int event_header_size = sizeof(struct perf_event_header);
int total_size = auxtrace_info->header.size;
int priv_size = 0;
int num_cpu;
int err = 0;
int aux_hw_id_found;
int i, j;
u64 *ptr = NULL;
u64 **metadata = NULL;
/*
* Create an RB tree for traceID-metadata tuple. Since the conversion
* has to be made for each packet that gets decoded, optimizing access
* in anything other than a sequential array is worth doing.
*/
traceid_list = intlist__new(NULL);
if (!traceid_list)
return -ENOMEM;
/* First the global part */
ptr = (u64 *) auxtrace_info->priv;
num_cpu = ptr[CS_PMU_TYPE_CPUS] & 0xffffffff;
metadata = zalloc(sizeof(*metadata) * num_cpu);
if (!metadata) {
err = -ENOMEM;
goto err_free_traceid_list;
}
/* Start parsing after the common part of the header */
i = CS_HEADER_VERSION_MAX;
/*
* The metadata is stored in the auxtrace_info section and encodes
* the configuration of the ARM embedded trace macrocell which is
* required by the trace decoder to properly decode the trace due
* to its highly compressed nature.
*/
for (j = 0; j < num_cpu; j++) {
if (ptr[i] == __perf_cs_etmv3_magic) {
metadata[j] =
cs_etm__create_meta_blk(ptr, &i,
CS_ETM_PRIV_MAX,
CS_ETM_NR_TRC_PARAMS_V0);
} else if (ptr[i] == __perf_cs_etmv4_magic) {
metadata[j] =
cs_etm__create_meta_blk(ptr, &i,
CS_ETMV4_PRIV_MAX,
CS_ETMV4_NR_TRC_PARAMS_V0);
} else if (ptr[i] == __perf_cs_ete_magic) {
metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
} else {
ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
ptr[i]);
err = -EINVAL;
goto err_free_metadata;
}
if (!metadata[j]) {
err = -ENOMEM;
goto err_free_metadata;
}
}
/*
* Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
* CS_ETMV4_PRIV_MAX mark how many double words are in the
* global metadata, and each cpu's metadata respectively.
* The following tests if the correct number of double words was
* present in the auxtrace info section.
*/
priv_size = total_size - event_header_size - INFO_HEADER_SIZE;
if (i * 8 != priv_size) {
err = -EINVAL;
goto err_free_metadata;
}
etm = zalloc(sizeof(*etm));
if (!etm) {
err = -ENOMEM;
goto err_free_metadata;
}
/*
* As all the ETMs run at the same exception level, the system should
* have the same PID format crossing CPUs. So cache the PID format
* and reuse it for sequential decoding.
*/
etm->pid_fmt = cs_etm__init_pid_fmt(metadata[0]);
err = auxtrace_queues__init(&etm->queues);
if (err)
goto err_free_etm;
if (session->itrace_synth_opts->set) {
etm->synth_opts = *session->itrace_synth_opts;
} else {
itrace_synth_opts__set_default(&etm->synth_opts,
session->itrace_synth_opts->default_no_sample);
etm->synth_opts.callchain = false;
}
etm->session = session;
etm->num_cpu = num_cpu;
etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
etm->metadata = metadata;
etm->auxtrace_type = auxtrace_info->type;
/* Use virtual timestamps if all ETMs report ts_source = 1 */
etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
if (!etm->has_virtual_ts)
ui__warning("Virtual timestamps are not enabled, or not supported by the traced system.\n"
"The time field of the samples will not be set accurately.\n\n");
etm->auxtrace.process_event = cs_etm__process_event;
etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
etm->auxtrace.flush_events = cs_etm__flush_events;
etm->auxtrace.free_events = cs_etm__free_events;
etm->auxtrace.free = cs_etm__free;
etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
session->auxtrace = &etm->auxtrace;
err = cs_etm__setup_timeless_decoding(etm);
if (err)
return err;
etm->tc.time_shift = tc->time_shift;
etm->tc.time_mult = tc->time_mult;
etm->tc.time_zero = tc->time_zero;
if (event_contains(*tc, time_cycles)) {
etm->tc.time_cycles = tc->time_cycles;
etm->tc.time_mask = tc->time_mask;
etm->tc.cap_user_time_zero = tc->cap_user_time_zero;
etm->tc.cap_user_time_short = tc->cap_user_time_short;
}
err = cs_etm__synth_events(etm, session);
if (err)
goto err_free_queues;
/*
* Map Trace ID values to CPU metadata.
*
* Trace metadata will always contain Trace ID values from the legacy algorithm. If the
* files has been recorded by a "new" perf updated to handle AUX_HW_ID then the metadata
* ID value will also have the CORESIGHT_TRACE_ID_UNUSED_FLAG set.
*
* The updated kernel drivers that use AUX_HW_ID to sent Trace IDs will attempt to use
* the same IDs as the old algorithm as far as is possible, unless there are clashes
* in which case a different value will be used. This means an older perf may still
* be able to record and read files generate on a newer system.
*
* For a perf able to interpret AUX_HW_ID packets we first check for the presence of
* those packets. If they are there then the values will be mapped and plugged into
* the metadata. We then set any remaining metadata values with the used flag to a
* value CORESIGHT_TRACE_ID_UNUSED_VAL - which indicates no decoder is required.
*
* If no AUX_HW_ID packets are present - which means a file recorded on an old kernel
* then we map Trace ID values to CPU directly from the metadata - clearing any unused
* flags if present.
*/
/* first scan for AUX_OUTPUT_HW_ID records to map trace ID values to CPU metadata */
aux_hw_id_found = 0;
err = perf_session__peek_events(session, session->header.data_offset,
session->header.data_size,
cs_etm__process_aux_hw_id_cb, &aux_hw_id_found);
if (err)
goto err_free_queues;
/* if HW ID found then clear any unused metadata ID values */
if (aux_hw_id_found)
err = cs_etm__clear_unused_trace_ids_metadata(num_cpu, metadata);
/* otherwise, this is a file with metadata values only, map from metadata */
else
err = cs_etm__map_trace_ids_metadata(num_cpu, metadata);
if (err)
goto err_free_queues;
err = cs_etm__queue_aux_records(session);
if (err)
goto err_free_queues;
etm->data_queued = etm->queues.populated;
return 0;
err_free_queues:
auxtrace_queues__free(&etm->queues);
session->auxtrace = NULL;
err_free_etm:
zfree(&etm);
err_free_metadata:
/* No need to check @metadata[j], free(NULL) is supported */
for (j = 0; j < num_cpu; j++)
zfree(&metadata[j]);
zfree(&metadata);
err_free_traceid_list:
intlist__delete(traceid_list);
return err;
}
| linux-master | tools/perf/util/cs-etm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* probe-event.c : perf-probe definition to probe_events format converter
*
* Written by Masami Hiramatsu <[email protected]>
*/
#include <inttypes.h>
#include <sys/utsname.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <limits.h>
#include <elf.h>
#include "build-id.h"
#include "event.h"
#include "namespaces.h"
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
#include "dso.h"
#include "color.h"
#include "map.h"
#include "maps.h"
#include "mutex.h"
#include "symbol.h"
#include <api/fs/fs.h>
#include "trace-event.h" /* For __maybe_unused */
#include "probe-event.h"
#include "probe-finder.h"
#include "probe-file.h"
#include "session.h"
#include "string2.h"
#include "strbuf.h"
#include <subcmd/pager.h>
#include <linux/ctype.h>
#include <linux/zalloc.h>
#ifdef HAVE_DEBUGINFOD_SUPPORT
#include <elfutils/debuginfod.h>
#endif
#define PERFPROBE_GROUP "probe"
bool probe_event_dry_run; /* Dry run flag */
struct probe_conf probe_conf = { .magic_num = DEFAULT_PROBE_MAGIC_NUM };
static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
#define semantic_error(msg ...) pr_err("Semantic error :" msg)
int e_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
va_start(ap, format);
ret = vsnprintf(str, size, format, ap);
va_end(ap);
if (ret >= (int)size)
ret = -E2BIG;
return ret;
}
static struct machine *host_machine;
/* Initialize symbol maps and path of vmlinux/modules */
int init_probe_symbol_maps(bool user_only)
{
int ret;
symbol_conf.allow_aliases = true;
ret = symbol__init(NULL);
if (ret < 0) {
pr_debug("Failed to init symbol map.\n");
goto out;
}
if (host_machine || user_only) /* already initialized */
return 0;
if (symbol_conf.vmlinux_name)
pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
host_machine = machine__new_host();
if (!host_machine) {
pr_debug("machine__new_host() failed.\n");
symbol__exit();
ret = -1;
}
out:
if (ret < 0)
pr_warning("Failed to init vmlinux path.\n");
return ret;
}
void exit_probe_symbol_maps(void)
{
machine__delete(host_machine);
host_machine = NULL;
symbol__exit();
}
static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
{
struct kmap *kmap;
struct map *map = machine__kernel_map(host_machine);
if (map__load(map) < 0)
return NULL;
kmap = map__kmap(map);
if (!kmap)
return NULL;
if (pmap)
*pmap = map;
return kmap->ref_reloc_sym;
}
static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
bool reloc, bool reladdr)
{
struct ref_reloc_sym *reloc_sym;
struct symbol *sym;
struct map *map;
/* ref_reloc_sym is just a label. Need a special fix*/
reloc_sym = kernel_get_ref_reloc_sym(&map);
if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
*addr = (!map__reloc(map) || reloc) ? reloc_sym->addr :
reloc_sym->unrelocated_addr;
else {
sym = machine__find_kernel_symbol_by_name(host_machine, name, &map);
if (!sym)
return -ENOENT;
*addr = map__unmap_ip(map, sym->start) -
((reloc) ? 0 : map__reloc(map)) -
((reladdr) ? map__start(map) : 0);
}
return 0;
}
static struct map *kernel_get_module_map(const char *module)
{
struct maps *maps = machine__kernel_maps(host_machine);
struct map_rb_node *pos;
/* A file path -- this is an offline module */
if (module && strchr(module, '/'))
return dso__new_map(module);
if (!module) {
struct map *map = machine__kernel_map(host_machine);
return map__get(map);
}
maps__for_each_entry(maps, pos) {
/* short_name is "[module]" */
struct dso *dso = map__dso(pos->map);
const char *short_name = dso->short_name;
u16 short_name_len = dso->short_name_len;
if (strncmp(short_name + 1, module,
short_name_len - 2) == 0 &&
module[short_name_len - 2] == '\0') {
return map__get(pos->map);
}
}
return NULL;
}
struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
{
/* Init maps of given executable or kernel */
if (user) {
struct map *map;
struct dso *dso;
map = dso__new_map(target);
dso = map ? map__dso(map) : NULL;
if (dso) {
mutex_lock(&dso->lock);
nsinfo__put(dso->nsinfo);
dso->nsinfo = nsinfo__get(nsi);
mutex_unlock(&dso->lock);
}
return map;
} else {
return kernel_get_module_map(target);
}
}
static int convert_exec_to_group(const char *exec, char **result)
{
char *ptr1, *ptr2, *exec_copy;
char buf[64];
int ret;
exec_copy = strdup(exec);
if (!exec_copy)
return -ENOMEM;
ptr1 = basename(exec_copy);
if (!ptr1) {
ret = -EINVAL;
goto out;
}
for (ptr2 = ptr1; *ptr2 != '\0'; ptr2++) {
if (!isalnum(*ptr2) && *ptr2 != '_') {
*ptr2 = '\0';
break;
}
}
ret = e_snprintf(buf, 64, "%s_%s", PERFPROBE_GROUP, ptr1);
if (ret < 0)
goto out;
*result = strdup(buf);
ret = *result ? 0 : -ENOMEM;
out:
free(exec_copy);
return ret;
}
static void clear_perf_probe_point(struct perf_probe_point *pp)
{
zfree(&pp->file);
zfree(&pp->function);
zfree(&pp->lazy_line);
}
static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
{
int i;
for (i = 0; i < ntevs; i++)
clear_probe_trace_event(tevs + i);
}
static bool kprobe_blacklist__listed(u64 address);
static bool kprobe_warn_out_range(const char *symbol, u64 address)
{
struct map *map;
bool ret = false;
map = kernel_get_module_map(NULL);
if (map) {
ret = address <= map__start(map) || map__end(map) < address;
if (ret)
pr_warning("%s is out of .text, skip it.\n", symbol);
map__put(map);
}
if (!ret && kprobe_blacklist__listed(address)) {
pr_warning("%s is blacklisted function, skip it.\n", symbol);
ret = true;
}
return ret;
}
/*
* @module can be module name of module file path. In case of path,
* inspect elf and find out what is actual module name.
* Caller has to free mod_name after using it.
*/
static char *find_module_name(const char *module)
{
int fd;
Elf *elf;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
Elf_Data *data;
Elf_Scn *sec;
char *mod_name = NULL;
int name_offset;
fd = open(module, O_RDONLY);
if (fd < 0)
return NULL;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
goto elf_err;
if (gelf_getehdr(elf, &ehdr) == NULL)
goto ret_err;
sec = elf_section_by_name(elf, &ehdr, &shdr,
".gnu.linkonce.this_module", NULL);
if (!sec)
goto ret_err;
data = elf_getdata(sec, NULL);
if (!data || !data->d_buf)
goto ret_err;
/*
* NOTE:
* '.gnu.linkonce.this_module' section of kernel module elf directly
* maps to 'struct module' from linux/module.h. This section contains
* actual module name which will be used by kernel after loading it.
* But, we cannot use 'struct module' here since linux/module.h is not
* exposed to user-space. Offset of 'name' has remained same from long
* time, so hardcoding it here.
*/
if (ehdr.e_ident[EI_CLASS] == ELFCLASS32)
name_offset = 12;
else /* expect ELFCLASS64 by default */
name_offset = 24;
mod_name = strdup((char *)data->d_buf + name_offset);
ret_err:
elf_end(elf);
elf_err:
close(fd);
return mod_name;
}
#ifdef HAVE_DWARF_SUPPORT
static int kernel_get_module_dso(const char *module, struct dso **pdso)
{
struct dso *dso;
struct map *map;
const char *vmlinux_name;
int ret = 0;
if (module) {
char module_name[128];
snprintf(module_name, sizeof(module_name), "[%s]", module);
map = maps__find_by_name(machine__kernel_maps(host_machine), module_name);
if (map) {
dso = map__dso(map);
goto found;
}
pr_debug("Failed to find module %s.\n", module);
return -ENOENT;
}
map = machine__kernel_map(host_machine);
dso = map__dso(map);
if (!dso->has_build_id)
dso__read_running_kernel_build_id(dso, host_machine);
vmlinux_name = symbol_conf.vmlinux_name;
dso->load_errno = 0;
if (vmlinux_name)
ret = dso__load_vmlinux(dso, map, vmlinux_name, false);
else
ret = dso__load_vmlinux_path(dso, map);
found:
*pdso = dso;
return ret;
}
/*
* Some binaries like glibc have special symbols which are on the symbol
* table, but not in the debuginfo. If we can find the address of the
* symbol from map, we can translate the address back to the probe point.
*/
static int find_alternative_probe_point(struct debuginfo *dinfo,
struct perf_probe_point *pp,
struct perf_probe_point *result,
const char *target, struct nsinfo *nsi,
bool uprobes)
{
struct map *map = NULL;
struct symbol *sym;
u64 address = 0;
int ret = -ENOENT;
size_t idx;
/* This can work only for function-name based one */
if (!pp->function || pp->file)
return -ENOTSUP;
map = get_target_map(target, nsi, uprobes);
if (!map)
return -EINVAL;
/* Find the address of given function */
map__for_each_symbol_by_name(map, pp->function, sym, idx) {
if (uprobes) {
address = sym->start;
if (sym->type == STT_GNU_IFUNC)
pr_warning("Warning: The probe function (%s) is a GNU indirect function.\n"
"Consider identifying the final function used at run time and set the probe directly on that.\n",
pp->function);
} else
address = map__unmap_ip(map, sym->start) - map__reloc(map);
break;
}
if (!address) {
ret = -ENOENT;
goto out;
}
pr_debug("Symbol %s address found : %" PRIx64 "\n",
pp->function, address);
ret = debuginfo__find_probe_point(dinfo, address, result);
if (ret <= 0)
ret = (!ret) ? -ENOENT : ret;
else {
result->offset += pp->offset;
result->line += pp->line;
result->retprobe = pp->retprobe;
ret = 0;
}
out:
map__put(map);
return ret;
}
static int get_alternative_probe_event(struct debuginfo *dinfo,
struct perf_probe_event *pev,
struct perf_probe_point *tmp)
{
int ret;
memcpy(tmp, &pev->point, sizeof(*tmp));
memset(&pev->point, 0, sizeof(pev->point));
ret = find_alternative_probe_point(dinfo, tmp, &pev->point, pev->target,
pev->nsi, pev->uprobes);
if (ret < 0)
memcpy(&pev->point, tmp, sizeof(*tmp));
return ret;
}
static int get_alternative_line_range(struct debuginfo *dinfo,
struct line_range *lr,
const char *target, bool user)
{
struct perf_probe_point pp = { .function = lr->function,
.file = lr->file,
.line = lr->start };
struct perf_probe_point result;
int ret, len = 0;
memset(&result, 0, sizeof(result));
if (lr->end != INT_MAX)
len = lr->end - lr->start;
ret = find_alternative_probe_point(dinfo, &pp, &result,
target, NULL, user);
if (!ret) {
lr->function = result.function;
lr->file = result.file;
lr->start = result.line;
if (lr->end != INT_MAX)
lr->end = lr->start + len;
clear_perf_probe_point(&pp);
}
return ret;
}
#ifdef HAVE_DEBUGINFOD_SUPPORT
static struct debuginfo *open_from_debuginfod(struct dso *dso, struct nsinfo *nsi,
bool silent)
{
debuginfod_client *c = debuginfod_begin();
char sbuild_id[SBUILD_ID_SIZE + 1];
struct debuginfo *ret = NULL;
struct nscookie nsc;
char *path;
int fd;
if (!c)
return NULL;
build_id__sprintf(&dso->bid, sbuild_id);
fd = debuginfod_find_debuginfo(c, (const unsigned char *)sbuild_id,
0, &path);
if (fd >= 0)
close(fd);
debuginfod_end(c);
if (fd < 0) {
if (!silent)
pr_debug("Failed to find debuginfo in debuginfod.\n");
return NULL;
}
if (!silent)
pr_debug("Load debuginfo from debuginfod (%s)\n", path);
nsinfo__mountns_enter(nsi, &nsc);
ret = debuginfo__new((const char *)path);
nsinfo__mountns_exit(&nsc);
return ret;
}
#else
static inline
struct debuginfo *open_from_debuginfod(struct dso *dso __maybe_unused,
struct nsinfo *nsi __maybe_unused,
bool silent __maybe_unused)
{
return NULL;
}
#endif
/* Open new debuginfo of given module */
static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi,
bool silent)
{
const char *path = module;
char reason[STRERR_BUFSIZE];
struct debuginfo *ret = NULL;
struct dso *dso = NULL;
struct nscookie nsc;
int err;
if (!module || !strchr(module, '/')) {
err = kernel_get_module_dso(module, &dso);
if (err < 0) {
if (!dso || dso->load_errno == 0) {
if (!str_error_r(-err, reason, STRERR_BUFSIZE))
strcpy(reason, "(unknown)");
} else
dso__strerror_load(dso, reason, STRERR_BUFSIZE);
if (dso)
ret = open_from_debuginfod(dso, nsi, silent);
if (ret)
return ret;
if (!silent) {
if (module)
pr_err("Module %s is not loaded, please specify its full path name.\n", module);
else
pr_err("Failed to find the path for the kernel: %s\n", reason);
}
return NULL;
}
path = dso->long_name;
}
nsinfo__mountns_enter(nsi, &nsc);
ret = debuginfo__new(path);
if (!ret && !silent) {
pr_warning("The %s file has no debug information.\n", path);
if (!module || !strtailcmp(path, ".ko"))
pr_warning("Rebuild with CONFIG_DEBUG_INFO=y, ");
else
pr_warning("Rebuild with -g, ");
pr_warning("or install an appropriate debuginfo package.\n");
}
nsinfo__mountns_exit(&nsc);
return ret;
}
/* For caching the last debuginfo */
static struct debuginfo *debuginfo_cache;
static char *debuginfo_cache_path;
static struct debuginfo *debuginfo_cache__open(const char *module, bool silent)
{
const char *path = module;
/* If the module is NULL, it should be the kernel. */
if (!module)
path = "kernel";
if (debuginfo_cache_path && !strcmp(debuginfo_cache_path, path))
goto out;
/* Copy module path */
free(debuginfo_cache_path);
debuginfo_cache_path = strdup(path);
if (!debuginfo_cache_path) {
debuginfo__delete(debuginfo_cache);
debuginfo_cache = NULL;
goto out;
}
debuginfo_cache = open_debuginfo(module, NULL, silent);
if (!debuginfo_cache)
zfree(&debuginfo_cache_path);
out:
return debuginfo_cache;
}
static void debuginfo_cache__exit(void)
{
debuginfo__delete(debuginfo_cache);
debuginfo_cache = NULL;
zfree(&debuginfo_cache_path);
}
static int get_text_start_address(const char *exec, u64 *address,
struct nsinfo *nsi)
{
Elf *elf;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
int fd, ret = -ENOENT;
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
fd = open(exec, O_RDONLY);
nsinfo__mountns_exit(&nsc);
if (fd < 0)
return -errno;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
ret = -EINVAL;
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL)
goto out;
if (!elf_section_by_name(elf, &ehdr, &shdr, ".text", NULL))
goto out;
*address = shdr.sh_addr - shdr.sh_offset;
ret = 0;
out:
elf_end(elf);
out_close:
close(fd);
return ret;
}
/*
* Convert trace point to probe point with debuginfo
*/
static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
struct perf_probe_point *pp,
bool is_kprobe)
{
struct debuginfo *dinfo = NULL;
u64 stext = 0;
u64 addr = tp->address;
int ret = -ENOENT;
/* convert the address to dwarf address */
if (!is_kprobe) {
if (!addr) {
ret = -EINVAL;
goto error;
}
ret = get_text_start_address(tp->module, &stext, NULL);
if (ret < 0)
goto error;
addr += stext;
} else if (tp->symbol) {
/* If the module is given, this returns relative address */
ret = kernel_get_symbol_address_by_name(tp->symbol, &addr,
false, !!tp->module);
if (ret != 0)
goto error;
addr += tp->offset;
}
pr_debug("try to find information at %" PRIx64 " in %s\n", addr,
tp->module ? : "kernel");
dinfo = debuginfo_cache__open(tp->module, verbose <= 0);
if (dinfo)
ret = debuginfo__find_probe_point(dinfo, addr, pp);
else
ret = -ENOENT;
if (ret > 0) {
pp->retprobe = tp->retprobe;
return 0;
}
error:
pr_debug("Failed to find corresponding probes from debuginfo.\n");
return ret ? : -ENOENT;
}
/* Adjust symbol name and address */
static int post_process_probe_trace_point(struct probe_trace_point *tp,
struct map *map, u64 offs)
{
struct symbol *sym;
u64 addr = tp->address - offs;
sym = map__find_symbol(map, addr);
if (!sym) {
/*
* If the address is in the inittext section, map can not
* find it. Ignore it if we are probing offline kernel.
*/
return (symbol_conf.ignore_vmlinux_buildid) ? 0 : -ENOENT;
}
if (strcmp(sym->name, tp->symbol)) {
/* If we have no realname, use symbol for it */
if (!tp->realname)
tp->realname = tp->symbol;
else
free(tp->symbol);
tp->symbol = strdup(sym->name);
if (!tp->symbol)
return -ENOMEM;
}
tp->offset = addr - sym->start;
tp->address -= offs;
return 0;
}
/*
* Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
* and generate new symbols with suffixes such as .constprop.N or .isra.N
* etc. Since those symbols are not recorded in DWARF, we have to find
* correct generated symbols from offline ELF binary.
* For online kernel or uprobes we don't need this because those are
* rebased on _text, or already a section relative address.
*/
static int
post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *pathname)
{
struct map *map;
u64 stext = 0;
int i, ret = 0;
/* Prepare a map for offline binary */
map = dso__new_map(pathname);
if (!map || get_text_start_address(pathname, &stext, NULL) < 0) {
pr_warning("Failed to get ELF symbols for %s\n", pathname);
return -EINVAL;
}
for (i = 0; i < ntevs; i++) {
ret = post_process_probe_trace_point(&tevs[i].point,
map, stext);
if (ret < 0)
break;
}
map__put(map);
return ret;
}
static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *exec,
struct nsinfo *nsi)
{
int i, ret = 0;
u64 stext = 0;
if (!exec)
return 0;
ret = get_text_start_address(exec, &stext, nsi);
if (ret < 0)
return ret;
for (i = 0; i < ntevs && ret >= 0; i++) {
/* point.address is the address of point.symbol + point.offset */
tevs[i].point.address -= stext;
tevs[i].point.module = strdup(exec);
if (!tevs[i].point.module) {
ret = -ENOMEM;
break;
}
tevs[i].uprobes = true;
}
return ret;
}
static int
post_process_module_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *module,
struct debuginfo *dinfo)
{
Dwarf_Addr text_offs = 0;
int i, ret = 0;
char *mod_name = NULL;
struct map *map;
if (!module)
return 0;
map = get_target_map(module, NULL, false);
if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
pr_warning("Failed to get ELF symbols for %s\n", module);
return -EINVAL;
}
mod_name = find_module_name(module);
for (i = 0; i < ntevs; i++) {
ret = post_process_probe_trace_point(&tevs[i].point,
map, text_offs);
if (ret < 0)
break;
tevs[i].point.module =
strdup(mod_name ? mod_name : module);
if (!tevs[i].point.module) {
ret = -ENOMEM;
break;
}
}
free(mod_name);
map__put(map);
return ret;
}
static int
post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
int ntevs)
{
struct ref_reloc_sym *reloc_sym;
struct map *map;
char *tmp;
int i, skipped = 0;
/* Skip post process if the target is an offline kernel */
if (symbol_conf.ignore_vmlinux_buildid)
return post_process_offline_probe_trace_events(tevs, ntevs,
symbol_conf.vmlinux_name);
reloc_sym = kernel_get_ref_reloc_sym(&map);
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found! "
"Check /proc/sys/kernel/kptr_restrict\n"
"and /proc/sys/kernel/perf_event_paranoid. "
"Or run as privileged perf user.\n\n");
return -EINVAL;
}
for (i = 0; i < ntevs; i++) {
if (!tevs[i].point.address)
continue;
if (tevs[i].point.retprobe && !kretprobe_offset_is_supported())
continue;
/*
* If we found a wrong one, mark it by NULL symbol.
* Since addresses in debuginfo is same as objdump, we need
* to convert it to addresses on memory.
*/
if (kprobe_warn_out_range(tevs[i].point.symbol,
map__objdump_2mem(map, tevs[i].point.address))) {
tmp = NULL;
skipped++;
} else {
tmp = strdup(reloc_sym->name);
if (!tmp)
return -ENOMEM;
}
/* If we have no realname, use symbol for it */
if (!tevs[i].point.realname)
tevs[i].point.realname = tevs[i].point.symbol;
else
free(tevs[i].point.symbol);
tevs[i].point.symbol = tmp;
tevs[i].point.offset = tevs[i].point.address -
(map__reloc(map) ? reloc_sym->unrelocated_addr :
reloc_sym->addr);
}
return skipped;
}
void __weak
arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused,
int ntevs __maybe_unused)
{
}
/* Post processing the probe events */
static int post_process_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event *tevs,
int ntevs, const char *module,
bool uprobe, struct debuginfo *dinfo)
{
int ret;
if (uprobe)
ret = add_exec_to_probe_trace_events(tevs, ntevs, module,
pev->nsi);
else if (module)
/* Currently ref_reloc_sym based probe is not for drivers */
ret = post_process_module_probe_trace_events(tevs, ntevs,
module, dinfo);
else
ret = post_process_kernel_probe_trace_events(tevs, ntevs);
if (ret >= 0)
arch__post_process_probe_trace_events(pev, ntevs);
return ret;
}
/* Try to find perf_probe_event with debuginfo */
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs)
{
bool need_dwarf = perf_probe_event_need_dwarf(pev);
struct perf_probe_point tmp;
struct debuginfo *dinfo;
int ntevs, ret = 0;
/* Workaround for gcc #98776 issue.
* Perf failed to add kretprobe event with debuginfo of vmlinux which is
* compiled by gcc with -fpatchable-function-entry option enabled. The
* same issue with kernel module. The retprobe doesn`t need debuginfo.
* This workaround solution use map to query the probe function address
* for retprobe event.
*/
if (pev->point.retprobe)
return 0;
dinfo = open_debuginfo(pev->target, pev->nsi, !need_dwarf);
if (!dinfo) {
if (need_dwarf)
return -ENODATA;
pr_debug("Could not open debuginfo. Try to use symbols.\n");
return 0;
}
pr_debug("Try to find probe point from debuginfo.\n");
/* Searching trace events corresponding to a probe event */
ntevs = debuginfo__find_trace_events(dinfo, pev, tevs);
if (ntevs == 0) { /* Not found, retry with an alternative */
ret = get_alternative_probe_event(dinfo, pev, &tmp);
if (!ret) {
ntevs = debuginfo__find_trace_events(dinfo, pev, tevs);
/*
* Write back to the original probe_event for
* setting appropriate (user given) event name
*/
clear_perf_probe_point(&pev->point);
memcpy(&pev->point, &tmp, sizeof(tmp));
}
}
if (ntevs > 0) { /* Succeeded to find trace events */
pr_debug("Found %d probe_trace_events.\n", ntevs);
ret = post_process_probe_trace_events(pev, *tevs, ntevs,
pev->target, pev->uprobes, dinfo);
if (ret < 0 || ret == ntevs) {
pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
ntevs = 0;
}
}
debuginfo__delete(dinfo);
if (ntevs == 0) { /* No error but failed to find probe point. */
char *probe_point = synthesize_perf_probe_point(&pev->point);
pr_warning("Probe point '%s' not found.\n", probe_point);
free(probe_point);
return -ENODEV;
} else if (ntevs < 0) {
/* Error path : ntevs < 0 */
pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
if (ntevs == -EBADF)
pr_warning("Warning: No dwarf info found in the vmlinux - "
"please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
if (!need_dwarf) {
pr_debug("Trying to use symbols.\n");
return 0;
}
}
return ntevs;
}
#define LINEBUF_SIZE 256
#define NR_ADDITIONAL_LINES 2
static int __show_one_line(FILE *fp, int l, bool skip, bool show_num)
{
char buf[LINEBUF_SIZE], sbuf[STRERR_BUFSIZE];
const char *color = show_num ? "" : PERF_COLOR_BLUE;
const char *prefix = NULL;
do {
if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
goto error;
if (skip)
continue;
if (!prefix) {
prefix = show_num ? "%7d " : " ";
color_fprintf(stdout, color, prefix, l);
}
color_fprintf(stdout, color, "%s", buf);
} while (strchr(buf, '\n') == NULL);
return 1;
error:
if (ferror(fp)) {
pr_warning("File read error: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
return -1;
}
return 0;
}
static int _show_one_line(FILE *fp, int l, bool skip, bool show_num)
{
int rv = __show_one_line(fp, l, skip, show_num);
if (rv == 0) {
pr_warning("Source file is shorter than expected.\n");
rv = -1;
}
return rv;
}
#define show_one_line_with_num(f,l) _show_one_line(f,l,false,true)
#define show_one_line(f,l) _show_one_line(f,l,false,false)
#define skip_one_line(f,l) _show_one_line(f,l,true,false)
#define show_one_line_or_eof(f,l) __show_one_line(f,l,false,false)
/*
* Show line-range always requires debuginfo to find source file and
* line number.
*/
static int __show_line_range(struct line_range *lr, const char *module,
bool user)
{
struct build_id bid;
int l = 1;
struct int_node *ln;
struct debuginfo *dinfo;
FILE *fp;
int ret;
char *tmp;
char sbuf[STRERR_BUFSIZE];
char sbuild_id[SBUILD_ID_SIZE] = "";
/* Search a line range */
dinfo = open_debuginfo(module, NULL, false);
if (!dinfo)
return -ENOENT;
ret = debuginfo__find_line_range(dinfo, lr);
if (!ret) { /* Not found, retry with an alternative */
ret = get_alternative_line_range(dinfo, lr, module, user);
if (!ret)
ret = debuginfo__find_line_range(dinfo, lr);
}
if (dinfo->build_id) {
build_id__init(&bid, dinfo->build_id, BUILD_ID_SIZE);
build_id__sprintf(&bid, sbuild_id);
}
debuginfo__delete(dinfo);
if (ret == 0 || ret == -ENOENT) {
pr_warning("Specified source line is not found.\n");
return -ENOENT;
} else if (ret < 0) {
pr_warning("Debuginfo analysis failed.\n");
return ret;
}
/* Convert source file path */
tmp = lr->path;
ret = find_source_path(tmp, sbuild_id, lr->comp_dir, &lr->path);
/* Free old path when new path is assigned */
if (tmp != lr->path)
free(tmp);
if (ret < 0) {
pr_warning("Failed to find source file path.\n");
return ret;
}
setup_pager();
if (lr->function)
fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path,
lr->start - lr->offset);
else
fprintf(stdout, "<%s:%d>\n", lr->path, lr->start);
fp = fopen(lr->path, "r");
if (fp == NULL) {
pr_warning("Failed to open %s: %s\n", lr->path,
str_error_r(errno, sbuf, sizeof(sbuf)));
return -errno;
}
/* Skip to starting line number */
while (l < lr->start) {
ret = skip_one_line(fp, l++);
if (ret < 0)
goto end;
}
intlist__for_each_entry(ln, lr->line_list) {
for (; ln->i > (unsigned long)l; l++) {
ret = show_one_line(fp, l - lr->offset);
if (ret < 0)
goto end;
}
ret = show_one_line_with_num(fp, l++ - lr->offset);
if (ret < 0)
goto end;
}
if (lr->end == INT_MAX)
lr->end = l + NR_ADDITIONAL_LINES;
while (l <= lr->end) {
ret = show_one_line_or_eof(fp, l++ - lr->offset);
if (ret <= 0)
break;
}
end:
fclose(fp);
return ret;
}
int show_line_range(struct line_range *lr, const char *module,
struct nsinfo *nsi, bool user)
{
int ret;
struct nscookie nsc;
ret = init_probe_symbol_maps(user);
if (ret < 0)
return ret;
nsinfo__mountns_enter(nsi, &nsc);
ret = __show_line_range(lr, module, user);
nsinfo__mountns_exit(&nsc);
exit_probe_symbol_maps();
return ret;
}
static int show_available_vars_at(struct debuginfo *dinfo,
struct perf_probe_event *pev,
struct strfilter *_filter)
{
char *buf;
int ret, i, nvars;
struct str_node *node;
struct variable_list *vls = NULL, *vl;
struct perf_probe_point tmp;
const char *var;
buf = synthesize_perf_probe_point(&pev->point);
if (!buf)
return -EINVAL;
pr_debug("Searching variables at %s\n", buf);
ret = debuginfo__find_available_vars_at(dinfo, pev, &vls);
if (!ret) { /* Not found, retry with an alternative */
ret = get_alternative_probe_event(dinfo, pev, &tmp);
if (!ret) {
ret = debuginfo__find_available_vars_at(dinfo, pev,
&vls);
/* Release the old probe_point */
clear_perf_probe_point(&tmp);
}
}
if (ret <= 0) {
if (ret == 0 || ret == -ENOENT) {
pr_err("Failed to find the address of %s\n", buf);
ret = -ENOENT;
} else
pr_warning("Debuginfo analysis failed.\n");
goto end;
}
/* Some variables are found */
fprintf(stdout, "Available variables at %s\n", buf);
for (i = 0; i < ret; i++) {
vl = &vls[i];
/*
* A probe point might be converted to
* several trace points.
*/
fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
vl->point.offset);
zfree(&vl->point.symbol);
nvars = 0;
if (vl->vars) {
strlist__for_each_entry(node, vl->vars) {
var = strchr(node->s, '\t') + 1;
if (strfilter__compare(_filter, var)) {
fprintf(stdout, "\t\t%s\n", node->s);
nvars++;
}
}
strlist__delete(vl->vars);
}
if (nvars == 0)
fprintf(stdout, "\t\t(No matched variables)\n");
}
free(vls);
end:
free(buf);
return ret;
}
/* Show available variables on given probe point */
int show_available_vars(struct perf_probe_event *pevs, int npevs,
struct strfilter *_filter)
{
int i, ret = 0;
struct debuginfo *dinfo;
ret = init_probe_symbol_maps(pevs->uprobes);
if (ret < 0)
return ret;
dinfo = open_debuginfo(pevs->target, pevs->nsi, false);
if (!dinfo) {
ret = -ENOENT;
goto out;
}
setup_pager();
for (i = 0; i < npevs && ret >= 0; i++)
ret = show_available_vars_at(dinfo, &pevs[i], _filter);
debuginfo__delete(dinfo);
out:
exit_probe_symbol_maps();
return ret;
}
#else /* !HAVE_DWARF_SUPPORT */
static void debuginfo_cache__exit(void)
{
}
static int
find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
struct perf_probe_point *pp __maybe_unused,
bool is_kprobe __maybe_unused)
{
return -ENOSYS;
}
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs __maybe_unused)
{
if (perf_probe_event_need_dwarf(pev)) {
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
return 0;
}
int show_line_range(struct line_range *lr __maybe_unused,
const char *module __maybe_unused,
struct nsinfo *nsi __maybe_unused,
bool user __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
int show_available_vars(struct perf_probe_event *pevs __maybe_unused,
int npevs __maybe_unused,
struct strfilter *filter __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
#endif
void line_range__clear(struct line_range *lr)
{
zfree(&lr->function);
zfree(&lr->file);
zfree(&lr->path);
zfree(&lr->comp_dir);
intlist__delete(lr->line_list);
}
int line_range__init(struct line_range *lr)
{
memset(lr, 0, sizeof(*lr));
lr->line_list = intlist__new(NULL);
if (!lr->line_list)
return -ENOMEM;
else
return 0;
}
static int parse_line_num(char **ptr, int *val, const char *what)
{
const char *start = *ptr;
errno = 0;
*val = strtol(*ptr, ptr, 0);
if (errno || *ptr == start) {
semantic_error("'%s' is not a valid number.\n", what);
return -EINVAL;
}
return 0;
}
/* Check the name is good for event, group or function */
static bool is_c_func_name(const char *name)
{
if (!isalpha(*name) && *name != '_')
return false;
while (*++name != '\0') {
if (!isalpha(*name) && !isdigit(*name) && *name != '_')
return false;
}
return true;
}
/*
* Stuff 'lr' according to the line range described by 'arg'.
* The line range syntax is described by:
*
* SRC[:SLN[+NUM|-ELN]]
* FNC[@SRC][:SLN[+NUM|-ELN]]
*/
int parse_line_range_desc(const char *arg, struct line_range *lr)
{
char *range, *file, *name = strdup(arg);
int err;
if (!name)
return -ENOMEM;
lr->start = 0;
lr->end = INT_MAX;
range = strchr(name, ':');
if (range) {
*range++ = '\0';
err = parse_line_num(&range, &lr->start, "start line");
if (err)
goto err;
if (*range == '+' || *range == '-') {
const char c = *range++;
err = parse_line_num(&range, &lr->end, "end line");
if (err)
goto err;
if (c == '+') {
lr->end += lr->start;
/*
* Adjust the number of lines here.
* If the number of lines == 1, the
* end of line should be equal to
* the start of line.
*/
lr->end--;
}
}
pr_debug("Line range is %d to %d\n", lr->start, lr->end);
err = -EINVAL;
if (lr->start > lr->end) {
semantic_error("Start line must be smaller"
" than end line.\n");
goto err;
}
if (*range != '\0') {
semantic_error("Tailing with invalid str '%s'.\n", range);
goto err;
}
}
file = strchr(name, '@');
if (file) {
*file = '\0';
lr->file = strdup(++file);
if (lr->file == NULL) {
err = -ENOMEM;
goto err;
}
lr->function = name;
} else if (strchr(name, '/') || strchr(name, '.'))
lr->file = name;
else if (is_c_func_name(name))/* We reuse it for checking funcname */
lr->function = name;
else { /* Invalid name */
semantic_error("'%s' is not a valid function name.\n", name);
err = -EINVAL;
goto err;
}
return 0;
err:
free(name);
return err;
}
static int parse_perf_probe_event_name(char **arg, struct perf_probe_event *pev)
{
char *ptr;
ptr = strpbrk_esc(*arg, ":");
if (ptr) {
*ptr = '\0';
if (!pev->sdt && !is_c_func_name(*arg))
goto ng_name;
pev->group = strdup_esc(*arg);
if (!pev->group)
return -ENOMEM;
*arg = ptr + 1;
} else
pev->group = NULL;
pev->event = strdup_esc(*arg);
if (pev->event == NULL)
return -ENOMEM;
if (!pev->sdt && !is_c_func_name(pev->event)) {
zfree(&pev->event);
ng_name:
zfree(&pev->group);
semantic_error("%s is bad for event name -it must "
"follow C symbol-naming rule.\n", *arg);
return -EINVAL;
}
return 0;
}
/* Parse probepoint definition. */
static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
{
struct perf_probe_point *pp = &pev->point;
char *ptr, *tmp;
char c, nc = 0;
bool file_spec = false;
int ret;
/*
* <Syntax>
* perf probe [GRP:][EVENT=]SRC[:LN|;PTN]
* perf probe [GRP:][EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT]
* perf probe %[GRP:]SDT_EVENT
*/
if (!arg)
return -EINVAL;
if (is_sdt_event(arg)) {
pev->sdt = true;
if (arg[0] == '%')
arg++;
}
ptr = strpbrk_esc(arg, ";=@+%");
if (pev->sdt) {
if (ptr) {
if (*ptr != '@') {
semantic_error("%s must be an SDT name.\n",
arg);
return -EINVAL;
}
/* This must be a target file name or build id */
tmp = build_id_cache__complement(ptr + 1);
if (tmp) {
pev->target = build_id_cache__origname(tmp);
free(tmp);
} else
pev->target = strdup_esc(ptr + 1);
if (!pev->target)
return -ENOMEM;
*ptr = '\0';
}
ret = parse_perf_probe_event_name(&arg, pev);
if (ret == 0) {
if (asprintf(&pev->point.function, "%%%s", pev->event) < 0)
ret = -errno;
}
return ret;
}
if (ptr && *ptr == '=') { /* Event name */
*ptr = '\0';
tmp = ptr + 1;
ret = parse_perf_probe_event_name(&arg, pev);
if (ret < 0)
return ret;
arg = tmp;
}
/*
* Check arg is function or file name and copy it.
*
* We consider arg to be a file spec if and only if it satisfies
* all of the below criteria::
* - it does not include any of "+@%",
* - it includes one of ":;", and
* - it has a period '.' in the name.
*
* Otherwise, we consider arg to be a function specification.
*/
if (!strpbrk_esc(arg, "+@%")) {
ptr = strpbrk_esc(arg, ";:");
/* This is a file spec if it includes a '.' before ; or : */
if (ptr && memchr(arg, '.', ptr - arg))
file_spec = true;
}
ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
}
if (arg[0] == '\0')
tmp = NULL;
else {
tmp = strdup_esc(arg);
if (tmp == NULL)
return -ENOMEM;
}
if (file_spec)
pp->file = tmp;
else {
pp->function = tmp;
/*
* Keep pp->function even if this is absolute address,
* so it can mark whether abs_address is valid.
* Which make 'perf probe lib.bin 0x0' possible.
*
* Note that checking length of tmp is not needed
* because when we access tmp[1] we know tmp[0] is '0',
* so tmp[1] should always valid (but could be '\0').
*/
if (tmp && !strncmp(tmp, "0x", 2)) {
pp->abs_address = strtoull(pp->function, &tmp, 0);
if (*tmp != '\0') {
semantic_error("Invalid absolute address.\n");
return -EINVAL;
}
}
}
/* Parse other options */
while (ptr) {
arg = ptr;
c = nc;
if (c == ';') { /* Lazy pattern must be the last part */
pp->lazy_line = strdup(arg); /* let leave escapes */
if (pp->lazy_line == NULL)
return -ENOMEM;
break;
}
ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
}
switch (c) {
case ':': /* Line number */
pp->line = strtoul(arg, &tmp, 0);
if (*tmp != '\0') {
semantic_error("There is non-digit char"
" in line number.\n");
return -EINVAL;
}
break;
case '+': /* Byte offset from a symbol */
pp->offset = strtoul(arg, &tmp, 0);
if (*tmp != '\0') {
semantic_error("There is non-digit character"
" in offset.\n");
return -EINVAL;
}
break;
case '@': /* File name */
if (pp->file) {
semantic_error("SRC@SRC is not allowed.\n");
return -EINVAL;
}
pp->file = strdup_esc(arg);
if (pp->file == NULL)
return -ENOMEM;
break;
case '%': /* Probe places */
if (strcmp(arg, "return") == 0) {
pp->retprobe = 1;
} else { /* Others not supported yet */
semantic_error("%%%s is not supported.\n", arg);
return -ENOTSUP;
}
break;
default: /* Buggy case */
pr_err("This program has a bug at %s:%d.\n",
__FILE__, __LINE__);
return -ENOTSUP;
break;
}
}
/* Exclusion check */
if (pp->lazy_line && pp->line) {
semantic_error("Lazy pattern can't be used with"
" line number.\n");
return -EINVAL;
}
if (pp->lazy_line && pp->offset) {
semantic_error("Lazy pattern can't be used with offset.\n");
return -EINVAL;
}
if (pp->line && pp->offset) {
semantic_error("Offset can't be used with line number.\n");
return -EINVAL;
}
if (!pp->line && !pp->lazy_line && pp->file && !pp->function) {
semantic_error("File always requires line number or "
"lazy pattern.\n");
return -EINVAL;
}
if (pp->offset && !pp->function) {
semantic_error("Offset requires an entry function.\n");
return -EINVAL;
}
if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) {
semantic_error("Offset/Line/Lazy pattern can't be used with "
"return probe.\n");
return -EINVAL;
}
pr_debug("symbol:%s file:%s line:%d offset:%lu return:%d lazy:%s\n",
pp->function, pp->file, pp->line, pp->offset, pp->retprobe,
pp->lazy_line);
return 0;
}
/* Parse perf-probe event argument */
static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg)
{
char *tmp, *goodname;
struct perf_probe_arg_field **fieldp;
pr_debug("parsing arg: %s into ", str);
tmp = strchr(str, '=');
if (tmp) {
arg->name = strndup(str, tmp - str);
if (arg->name == NULL)
return -ENOMEM;
pr_debug("name:%s ", arg->name);
str = tmp + 1;
}
tmp = strchr(str, '@');
if (tmp && tmp != str && !strcmp(tmp + 1, "user")) { /* user attr */
if (!user_access_is_supported()) {
semantic_error("ftrace does not support user access\n");
return -EINVAL;
}
*tmp = '\0';
arg->user_access = true;
pr_debug("user_access ");
}
tmp = strchr(str, ':');
if (tmp) { /* Type setting */
*tmp = '\0';
arg->type = strdup(tmp + 1);
if (arg->type == NULL)
return -ENOMEM;
pr_debug("type:%s ", arg->type);
}
tmp = strpbrk(str, "-.[");
if (!is_c_varname(str) || !tmp) {
/* A variable, register, symbol or special value */
arg->var = strdup(str);
if (arg->var == NULL)
return -ENOMEM;
pr_debug("%s\n", arg->var);
return 0;
}
/* Structure fields or array element */
arg->var = strndup(str, tmp - str);
if (arg->var == NULL)
return -ENOMEM;
goodname = arg->var;
pr_debug("%s, ", arg->var);
fieldp = &arg->field;
do {
*fieldp = zalloc(sizeof(struct perf_probe_arg_field));
if (*fieldp == NULL)
return -ENOMEM;
if (*tmp == '[') { /* Array */
str = tmp;
(*fieldp)->index = strtol(str + 1, &tmp, 0);
(*fieldp)->ref = true;
if (*tmp != ']' || tmp == str + 1) {
semantic_error("Array index must be a"
" number.\n");
return -EINVAL;
}
tmp++;
if (*tmp == '\0')
tmp = NULL;
} else { /* Structure */
if (*tmp == '.') {
str = tmp + 1;
(*fieldp)->ref = false;
} else if (tmp[1] == '>') {
str = tmp + 2;
(*fieldp)->ref = true;
} else {
semantic_error("Argument parse error: %s\n",
str);
return -EINVAL;
}
tmp = strpbrk(str, "-.[");
}
if (tmp) {
(*fieldp)->name = strndup(str, tmp - str);
if ((*fieldp)->name == NULL)
return -ENOMEM;
if (*str != '[')
goodname = (*fieldp)->name;
pr_debug("%s(%d), ", (*fieldp)->name, (*fieldp)->ref);
fieldp = &(*fieldp)->next;
}
} while (tmp);
(*fieldp)->name = strdup(str);
if ((*fieldp)->name == NULL)
return -ENOMEM;
if (*str != '[')
goodname = (*fieldp)->name;
pr_debug("%s(%d)\n", (*fieldp)->name, (*fieldp)->ref);
/* If no name is specified, set the last field name (not array index)*/
if (!arg->name) {
arg->name = strdup(goodname);
if (arg->name == NULL)
return -ENOMEM;
}
return 0;
}
/* Parse perf-probe event command */
int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
{
char **argv;
int argc, i, ret = 0;
argv = argv_split(cmd, &argc);
if (!argv) {
pr_debug("Failed to split arguments.\n");
return -ENOMEM;
}
if (argc - 1 > MAX_PROBE_ARGS) {
semantic_error("Too many probe arguments (%d).\n", argc - 1);
ret = -ERANGE;
goto out;
}
/* Parse probe point */
ret = parse_perf_probe_point(argv[0], pev);
if (ret < 0)
goto out;
/* Generate event name if needed */
if (!pev->event && pev->point.function && pev->point.line
&& !pev->point.lazy_line && !pev->point.offset) {
if (asprintf(&pev->event, "%s_L%d", pev->point.function,
pev->point.line) < 0) {
ret = -ENOMEM;
goto out;
}
}
/* Copy arguments and ensure return probe has no C argument */
pev->nargs = argc - 1;
pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
if (pev->args == NULL) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < pev->nargs && ret >= 0; i++) {
ret = parse_perf_probe_arg(argv[i + 1], &pev->args[i]);
if (ret >= 0 &&
is_c_varname(pev->args[i].var) && pev->point.retprobe) {
semantic_error("You can't specify local variable for"
" kretprobe.\n");
ret = -EINVAL;
}
}
out:
argv_free(argv);
return ret;
}
/* Returns true if *any* ARG is either C variable, $params or $vars. */
bool perf_probe_with_var(struct perf_probe_event *pev)
{
int i = 0;
for (i = 0; i < pev->nargs; i++)
if (is_c_varname(pev->args[i].var) ||
!strcmp(pev->args[i].var, PROBE_ARG_PARAMS) ||
!strcmp(pev->args[i].var, PROBE_ARG_VARS))
return true;
return false;
}
/* Return true if this perf_probe_event requires debuginfo */
bool perf_probe_event_need_dwarf(struct perf_probe_event *pev)
{
if (pev->point.file || pev->point.line || pev->point.lazy_line)
return true;
if (perf_probe_with_var(pev))
return true;
return false;
}
/* Parse probe_events event into struct probe_point */
int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
{
struct probe_trace_point *tp = &tev->point;
char pr;
char *p;
char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str;
int ret, i, argc;
char **argv;
pr_debug("Parsing probe_events: %s\n", cmd);
argv = argv_split(cmd, &argc);
if (!argv) {
pr_debug("Failed to split arguments.\n");
return -ENOMEM;
}
if (argc < 2) {
semantic_error("Too few probe arguments.\n");
ret = -ERANGE;
goto out;
}
/* Scan event and group name. */
argv0_str = strdup(argv[0]);
if (argv0_str == NULL) {
ret = -ENOMEM;
goto out;
}
fmt1_str = strtok_r(argv0_str, ":", &fmt);
fmt2_str = strtok_r(NULL, "/", &fmt);
fmt3_str = strtok_r(NULL, " \t", &fmt);
if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) {
semantic_error("Failed to parse event name: %s\n", argv[0]);
ret = -EINVAL;
goto out;
}
pr = fmt1_str[0];
tev->group = strdup(fmt2_str);
tev->event = strdup(fmt3_str);
if (tev->group == NULL || tev->event == NULL) {
ret = -ENOMEM;
goto out;
}
pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
tp->retprobe = (pr == 'r');
/* Scan module name(if there), function name and offset */
p = strchr(argv[1], ':');
if (p) {
tp->module = strndup(argv[1], p - argv[1]);
if (!tp->module) {
ret = -ENOMEM;
goto out;
}
tev->uprobes = (tp->module[0] == '/');
p++;
} else
p = argv[1];
fmt1_str = strtok_r(p, "+", &fmt);
/* only the address started with 0x */
if (fmt1_str[0] == '0') {
/*
* Fix a special case:
* if address == 0, kernel reports something like:
* p:probe_libc/abs_0 /lib/libc-2.18.so:0x (null) arg1=%ax
* Newer kernel may fix that, but we want to
* support old kernel also.
*/
if (strcmp(fmt1_str, "0x") == 0) {
if (!argv[2] || strcmp(argv[2], "(null)")) {
ret = -EINVAL;
goto out;
}
tp->address = 0;
free(argv[2]);
for (i = 2; argv[i + 1] != NULL; i++)
argv[i] = argv[i + 1];
argv[i] = NULL;
argc -= 1;
} else
tp->address = strtoull(fmt1_str, NULL, 0);
} else {
/* Only the symbol-based probe has offset */
tp->symbol = strdup(fmt1_str);
if (tp->symbol == NULL) {
ret = -ENOMEM;
goto out;
}
fmt2_str = strtok_r(NULL, "", &fmt);
if (fmt2_str == NULL)
tp->offset = 0;
else
tp->offset = strtoul(fmt2_str, NULL, 10);
}
if (tev->uprobes) {
fmt2_str = strchr(p, '(');
if (fmt2_str)
tp->ref_ctr_offset = strtoul(fmt2_str + 1, NULL, 0);
}
tev->nargs = argc - 2;
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
if (tev->args == NULL) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < tev->nargs; i++) {
p = strchr(argv[i + 2], '=');
if (p) /* We don't need which register is assigned. */
*p++ = '\0';
else
p = argv[i + 2];
tev->args[i].name = strdup(argv[i + 2]);
/* TODO: parse regs and offset */
tev->args[i].value = strdup(p);
if (tev->args[i].name == NULL || tev->args[i].value == NULL) {
ret = -ENOMEM;
goto out;
}
}
ret = 0;
out:
free(argv0_str);
argv_free(argv);
return ret;
}
/* Compose only probe arg */
char *synthesize_perf_probe_arg(struct perf_probe_arg *pa)
{
struct perf_probe_arg_field *field = pa->field;
struct strbuf buf;
char *ret = NULL;
int err;
if (strbuf_init(&buf, 64) < 0)
return NULL;
if (pa->name && pa->var)
err = strbuf_addf(&buf, "%s=%s", pa->name, pa->var);
else
err = strbuf_addstr(&buf, pa->name ?: pa->var);
if (err)
goto out;
while (field) {
if (field->name[0] == '[')
err = strbuf_addstr(&buf, field->name);
else
err = strbuf_addf(&buf, "%s%s", field->ref ? "->" : ".",
field->name);
field = field->next;
if (err)
goto out;
}
if (pa->type)
if (strbuf_addf(&buf, ":%s", pa->type) < 0)
goto out;
ret = strbuf_detach(&buf, NULL);
out:
strbuf_release(&buf);
return ret;
}
/* Compose only probe point (not argument) */
static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
{
struct strbuf buf;
char *tmp, *ret = NULL;
int len, err = 0;
if (strbuf_init(&buf, 64) < 0)
return NULL;
if (pp->function) {
if (strbuf_addstr(&buf, pp->function) < 0)
goto out;
if (pp->offset)
err = strbuf_addf(&buf, "+%lu", pp->offset);
else if (pp->line)
err = strbuf_addf(&buf, ":%d", pp->line);
else if (pp->retprobe)
err = strbuf_addstr(&buf, "%return");
if (err)
goto out;
}
if (pp->file) {
tmp = pp->file;
len = strlen(tmp);
if (len > 30) {
tmp = strchr(pp->file + len - 30, '/');
tmp = tmp ? tmp + 1 : pp->file + len - 30;
}
err = strbuf_addf(&buf, "@%s", tmp);
if (!err && !pp->function && pp->line)
err = strbuf_addf(&buf, ":%d", pp->line);
}
if (!err)
ret = strbuf_detach(&buf, NULL);
out:
strbuf_release(&buf);
return ret;
}
char *synthesize_perf_probe_command(struct perf_probe_event *pev)
{
struct strbuf buf;
char *tmp, *ret = NULL;
int i;
if (strbuf_init(&buf, 64))
return NULL;
if (pev->event)
if (strbuf_addf(&buf, "%s:%s=", pev->group ?: PERFPROBE_GROUP,
pev->event) < 0)
goto out;
tmp = synthesize_perf_probe_point(&pev->point);
if (!tmp || strbuf_addstr(&buf, tmp) < 0) {
free(tmp);
goto out;
}
free(tmp);
for (i = 0; i < pev->nargs; i++) {
tmp = synthesize_perf_probe_arg(pev->args + i);
if (!tmp || strbuf_addf(&buf, " %s", tmp) < 0) {
free(tmp);
goto out;
}
free(tmp);
}
ret = strbuf_detach(&buf, NULL);
out:
strbuf_release(&buf);
return ret;
}
static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref,
struct strbuf *buf, int depth)
{
int err;
if (ref->next) {
depth = __synthesize_probe_trace_arg_ref(ref->next, buf,
depth + 1);
if (depth < 0)
return depth;
}
if (ref->user_access)
err = strbuf_addf(buf, "%s%ld(", "+u", ref->offset);
else
err = strbuf_addf(buf, "%+ld(", ref->offset);
return (err < 0) ? err : depth;
}
static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
struct strbuf *buf)
{
struct probe_trace_arg_ref *ref = arg->ref;
int depth = 0, err;
/* Argument name or separator */
if (arg->name)
err = strbuf_addf(buf, " %s=", arg->name);
else
err = strbuf_addch(buf, ' ');
if (err)
return err;
/* Special case: @XXX */
if (arg->value[0] == '@' && arg->ref)
ref = ref->next;
/* Dereferencing arguments */
if (ref) {
depth = __synthesize_probe_trace_arg_ref(ref, buf, 1);
if (depth < 0)
return depth;
}
/* Print argument value */
if (arg->value[0] == '@' && arg->ref)
err = strbuf_addf(buf, "%s%+ld", arg->value, arg->ref->offset);
else
err = strbuf_addstr(buf, arg->value);
/* Closing */
while (!err && depth--)
err = strbuf_addch(buf, ')');
/* Print argument type */
if (!err && arg->type)
err = strbuf_addf(buf, ":%s", arg->type);
return err;
}
static int
synthesize_probe_trace_args(struct probe_trace_event *tev, struct strbuf *buf)
{
int i, ret = 0;
for (i = 0; i < tev->nargs && ret >= 0; i++)
ret = synthesize_probe_trace_arg(&tev->args[i], buf);
return ret;
}
static int
synthesize_uprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
{
int err;
/* Uprobes must have tp->module */
if (!tp->module)
return -EINVAL;
/*
* If tp->address == 0, then this point must be a
* absolute address uprobe.
* try_to_find_absolute_address() should have made
* tp->symbol to "0x0".
*/
if (!tp->address && (!tp->symbol || strcmp(tp->symbol, "0x0")))
return -EINVAL;
/* Use the tp->address for uprobes */
err = strbuf_addf(buf, "%s:0x%" PRIx64, tp->module, tp->address);
if (err >= 0 && tp->ref_ctr_offset) {
if (!uprobe_ref_ctr_is_supported())
return -EINVAL;
err = strbuf_addf(buf, "(0x%lx)", tp->ref_ctr_offset);
}
return err >= 0 ? 0 : err;
}
static int
synthesize_kprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
{
if (!strncmp(tp->symbol, "0x", 2)) {
/* Absolute address. See try_to_find_absolute_address() */
return strbuf_addf(buf, "%s%s0x%" PRIx64, tp->module ?: "",
tp->module ? ":" : "", tp->address);
} else {
return strbuf_addf(buf, "%s%s%s+%lu", tp->module ?: "",
tp->module ? ":" : "", tp->symbol, tp->offset);
}
}
char *synthesize_probe_trace_command(struct probe_trace_event *tev)
{
struct probe_trace_point *tp = &tev->point;
struct strbuf buf;
char *ret = NULL;
int err;
if (strbuf_init(&buf, 32) < 0)
return NULL;
if (strbuf_addf(&buf, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
tev->group, tev->event) < 0)
goto error;
if (tev->uprobes)
err = synthesize_uprobe_trace_def(tp, &buf);
else
err = synthesize_kprobe_trace_def(tp, &buf);
if (err >= 0)
err = synthesize_probe_trace_args(tev, &buf);
if (err >= 0)
ret = strbuf_detach(&buf, NULL);
error:
strbuf_release(&buf);
return ret;
}
static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
struct perf_probe_point *pp,
bool is_kprobe)
{
struct symbol *sym = NULL;
struct map *map = NULL;
u64 addr = tp->address;
int ret = -ENOENT;
if (!is_kprobe) {
map = dso__new_map(tp->module);
if (!map)
goto out;
sym = map__find_symbol(map, addr);
} else {
if (tp->symbol && !addr) {
if (kernel_get_symbol_address_by_name(tp->symbol,
&addr, true, false) < 0)
goto out;
}
if (addr) {
addr += tp->offset;
sym = machine__find_kernel_symbol(host_machine, addr, &map);
}
}
if (!sym)
goto out;
pp->retprobe = tp->retprobe;
pp->offset = addr - map__unmap_ip(map, sym->start);
pp->function = strdup(sym->name);
ret = pp->function ? 0 : -ENOMEM;
out:
if (map && !is_kprobe) {
map__put(map);
}
return ret;
}
static int convert_to_perf_probe_point(struct probe_trace_point *tp,
struct perf_probe_point *pp,
bool is_kprobe)
{
char buf[128];
int ret;
ret = find_perf_probe_point_from_dwarf(tp, pp, is_kprobe);
if (!ret)
return 0;
ret = find_perf_probe_point_from_map(tp, pp, is_kprobe);
if (!ret)
return 0;
pr_debug("Failed to find probe point from both of dwarf and map.\n");
if (tp->symbol) {
pp->function = strdup(tp->symbol);
pp->offset = tp->offset;
} else {
ret = e_snprintf(buf, 128, "0x%" PRIx64, tp->address);
if (ret < 0)
return ret;
pp->function = strdup(buf);
pp->offset = 0;
}
if (pp->function == NULL)
return -ENOMEM;
pp->retprobe = tp->retprobe;
return 0;
}
static int convert_to_perf_probe_event(struct probe_trace_event *tev,
struct perf_probe_event *pev, bool is_kprobe)
{
struct strbuf buf = STRBUF_INIT;
int i, ret;
/* Convert event/group name */
pev->event = strdup(tev->event);
pev->group = strdup(tev->group);
if (pev->event == NULL || pev->group == NULL)
return -ENOMEM;
/* Convert trace_point to probe_point */
ret = convert_to_perf_probe_point(&tev->point, &pev->point, is_kprobe);
if (ret < 0)
return ret;
/* Convert trace_arg to probe_arg */
pev->nargs = tev->nargs;
pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
if (pev->args == NULL)
return -ENOMEM;
for (i = 0; i < tev->nargs && ret >= 0; i++) {
if (tev->args[i].name)
pev->args[i].name = strdup(tev->args[i].name);
else {
if ((ret = strbuf_init(&buf, 32)) < 0)
goto error;
ret = synthesize_probe_trace_arg(&tev->args[i], &buf);
pev->args[i].name = strbuf_detach(&buf, NULL);
}
if (pev->args[i].name == NULL && ret >= 0)
ret = -ENOMEM;
}
error:
if (ret < 0)
clear_perf_probe_event(pev);
return ret;
}
void clear_perf_probe_event(struct perf_probe_event *pev)
{
struct perf_probe_arg_field *field, *next;
int i;
zfree(&pev->event);
zfree(&pev->group);
zfree(&pev->target);
clear_perf_probe_point(&pev->point);
for (i = 0; i < pev->nargs; i++) {
zfree(&pev->args[i].name);
zfree(&pev->args[i].var);
zfree(&pev->args[i].type);
field = pev->args[i].field;
while (field) {
next = field->next;
zfree(&field->name);
free(field);
field = next;
}
}
pev->nargs = 0;
zfree(&pev->args);
}
#define strdup_or_goto(str, label) \
({ char *__p = NULL; if (str && !(__p = strdup(str))) goto label; __p; })
static int perf_probe_point__copy(struct perf_probe_point *dst,
struct perf_probe_point *src)
{
dst->file = strdup_or_goto(src->file, out_err);
dst->function = strdup_or_goto(src->function, out_err);
dst->lazy_line = strdup_or_goto(src->lazy_line, out_err);
dst->line = src->line;
dst->retprobe = src->retprobe;
dst->offset = src->offset;
return 0;
out_err:
clear_perf_probe_point(dst);
return -ENOMEM;
}
static int perf_probe_arg__copy(struct perf_probe_arg *dst,
struct perf_probe_arg *src)
{
struct perf_probe_arg_field *field, **ppfield;
dst->name = strdup_or_goto(src->name, out_err);
dst->var = strdup_or_goto(src->var, out_err);
dst->type = strdup_or_goto(src->type, out_err);
field = src->field;
ppfield = &(dst->field);
while (field) {
*ppfield = zalloc(sizeof(*field));
if (!*ppfield)
goto out_err;
(*ppfield)->name = strdup_or_goto(field->name, out_err);
(*ppfield)->index = field->index;
(*ppfield)->ref = field->ref;
field = field->next;
ppfield = &((*ppfield)->next);
}
return 0;
out_err:
return -ENOMEM;
}
int perf_probe_event__copy(struct perf_probe_event *dst,
struct perf_probe_event *src)
{
int i;
dst->event = strdup_or_goto(src->event, out_err);
dst->group = strdup_or_goto(src->group, out_err);
dst->target = strdup_or_goto(src->target, out_err);
dst->uprobes = src->uprobes;
if (perf_probe_point__copy(&dst->point, &src->point) < 0)
goto out_err;
dst->args = zalloc(sizeof(struct perf_probe_arg) * src->nargs);
if (!dst->args)
goto out_err;
dst->nargs = src->nargs;
for (i = 0; i < src->nargs; i++)
if (perf_probe_arg__copy(&dst->args[i], &src->args[i]) < 0)
goto out_err;
return 0;
out_err:
clear_perf_probe_event(dst);
return -ENOMEM;
}
void clear_probe_trace_event(struct probe_trace_event *tev)
{
struct probe_trace_arg_ref *ref, *next;
int i;
zfree(&tev->event);
zfree(&tev->group);
zfree(&tev->point.symbol);
zfree(&tev->point.realname);
zfree(&tev->point.module);
for (i = 0; i < tev->nargs; i++) {
zfree(&tev->args[i].name);
zfree(&tev->args[i].value);
zfree(&tev->args[i].type);
ref = tev->args[i].ref;
while (ref) {
next = ref->next;
free(ref);
ref = next;
}
}
zfree(&tev->args);
tev->nargs = 0;
}
struct kprobe_blacklist_node {
struct list_head list;
u64 start;
u64 end;
char *symbol;
};
static void kprobe_blacklist__delete(struct list_head *blacklist)
{
struct kprobe_blacklist_node *node;
while (!list_empty(blacklist)) {
node = list_first_entry(blacklist,
struct kprobe_blacklist_node, list);
list_del_init(&node->list);
zfree(&node->symbol);
free(node);
}
}
static int kprobe_blacklist__load(struct list_head *blacklist)
{
struct kprobe_blacklist_node *node;
const char *__debugfs = debugfs__mountpoint();
char buf[PATH_MAX], *p;
FILE *fp;
int ret;
if (__debugfs == NULL)
return -ENOTSUP;
ret = e_snprintf(buf, PATH_MAX, "%s/kprobes/blacklist", __debugfs);
if (ret < 0)
return ret;
fp = fopen(buf, "r");
if (!fp)
return -errno;
ret = 0;
while (fgets(buf, PATH_MAX, fp)) {
node = zalloc(sizeof(*node));
if (!node) {
ret = -ENOMEM;
break;
}
INIT_LIST_HEAD(&node->list);
list_add_tail(&node->list, blacklist);
if (sscanf(buf, "0x%" PRIx64 "-0x%" PRIx64, &node->start, &node->end) != 2) {
ret = -EINVAL;
break;
}
p = strchr(buf, '\t');
if (p) {
p++;
if (p[strlen(p) - 1] == '\n')
p[strlen(p) - 1] = '\0';
} else
p = (char *)"unknown";
node->symbol = strdup(p);
if (!node->symbol) {
ret = -ENOMEM;
break;
}
pr_debug2("Blacklist: 0x%" PRIx64 "-0x%" PRIx64 ", %s\n",
node->start, node->end, node->symbol);
ret++;
}
if (ret < 0)
kprobe_blacklist__delete(blacklist);
fclose(fp);
return ret;
}
static struct kprobe_blacklist_node *
kprobe_blacklist__find_by_address(struct list_head *blacklist, u64 address)
{
struct kprobe_blacklist_node *node;
list_for_each_entry(node, blacklist, list) {
if (node->start <= address && address < node->end)
return node;
}
return NULL;
}
static LIST_HEAD(kprobe_blacklist);
static void kprobe_blacklist__init(void)
{
if (!list_empty(&kprobe_blacklist))
return;
if (kprobe_blacklist__load(&kprobe_blacklist) < 0)
pr_debug("No kprobe blacklist support, ignored\n");
}
static void kprobe_blacklist__release(void)
{
kprobe_blacklist__delete(&kprobe_blacklist);
}
static bool kprobe_blacklist__listed(u64 address)
{
return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address);
}
static int perf_probe_event__sprintf(const char *group, const char *event,
struct perf_probe_event *pev,
const char *module,
struct strbuf *result)
{
int i, ret;
char *buf;
if (asprintf(&buf, "%s:%s", group, event) < 0)
return -errno;
ret = strbuf_addf(result, " %-20s (on ", buf);
free(buf);
if (ret)
return ret;
/* Synthesize only event probe point */
buf = synthesize_perf_probe_point(&pev->point);
if (!buf)
return -ENOMEM;
ret = strbuf_addstr(result, buf);
free(buf);
if (!ret && module)
ret = strbuf_addf(result, " in %s", module);
if (!ret && pev->nargs > 0) {
ret = strbuf_add(result, " with", 5);
for (i = 0; !ret && i < pev->nargs; i++) {
buf = synthesize_perf_probe_arg(&pev->args[i]);
if (!buf)
return -ENOMEM;
ret = strbuf_addf(result, " %s", buf);
free(buf);
}
}
if (!ret)
ret = strbuf_addch(result, ')');
return ret;
}
/* Show an event */
int show_perf_probe_event(const char *group, const char *event,
struct perf_probe_event *pev,
const char *module, bool use_stdout)
{
struct strbuf buf = STRBUF_INIT;
int ret;
ret = perf_probe_event__sprintf(group, event, pev, module, &buf);
if (ret >= 0) {
if (use_stdout)
printf("%s\n", buf.buf);
else
pr_info("%s\n", buf.buf);
}
strbuf_release(&buf);
return ret;
}
static bool filter_probe_trace_event(struct probe_trace_event *tev,
struct strfilter *filter)
{
char tmp[128];
/* At first, check the event name itself */
if (strfilter__compare(filter, tev->event))
return true;
/* Next, check the combination of name and group */
if (e_snprintf(tmp, 128, "%s:%s", tev->group, tev->event) < 0)
return false;
return strfilter__compare(filter, tmp);
}
static int __show_perf_probe_events(int fd, bool is_kprobe,
struct strfilter *filter)
{
int ret = 0;
struct probe_trace_event tev;
struct perf_probe_event pev;
struct strlist *rawlist;
struct str_node *ent;
memset(&tev, 0, sizeof(tev));
memset(&pev, 0, sizeof(pev));
rawlist = probe_file__get_rawlist(fd);
if (!rawlist)
return -ENOMEM;
strlist__for_each_entry(ent, rawlist) {
ret = parse_probe_trace_command(ent->s, &tev);
if (ret >= 0) {
if (!filter_probe_trace_event(&tev, filter))
goto next;
ret = convert_to_perf_probe_event(&tev, &pev,
is_kprobe);
if (ret < 0)
goto next;
ret = show_perf_probe_event(pev.group, pev.event,
&pev, tev.point.module,
true);
}
next:
clear_perf_probe_event(&pev);
clear_probe_trace_event(&tev);
if (ret < 0)
break;
}
strlist__delete(rawlist);
/* Cleanup cached debuginfo if needed */
debuginfo_cache__exit();
return ret;
}
/* List up current perf-probe events */
int show_perf_probe_events(struct strfilter *filter)
{
int kp_fd, up_fd, ret;
setup_pager();
if (probe_conf.cache)
return probe_cache__show_all_caches(filter);
ret = init_probe_symbol_maps(false);
if (ret < 0)
return ret;
ret = probe_file__open_both(&kp_fd, &up_fd, 0);
if (ret < 0)
return ret;
if (kp_fd >= 0)
ret = __show_perf_probe_events(kp_fd, true, filter);
if (up_fd >= 0 && ret >= 0)
ret = __show_perf_probe_events(up_fd, false, filter);
if (kp_fd > 0)
close(kp_fd);
if (up_fd > 0)
close(up_fd);
exit_probe_symbol_maps();
return ret;
}
static int get_new_event_name(char *buf, size_t len, const char *base,
struct strlist *namelist, bool ret_event,
bool allow_suffix)
{
int i, ret;
char *p, *nbase;
if (*base == '.')
base++;
nbase = strdup(base);
if (!nbase)
return -ENOMEM;
/* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
p = strpbrk(nbase, ".@");
if (p && p != nbase)
*p = '\0';
/* Try no suffix number */
ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
goto out;
}
if (!strlist__has_entry(namelist, buf))
goto out;
if (!allow_suffix) {
pr_warning("Error: event \"%s\" already exists.\n"
" Hint: Remove existing event by 'perf probe -d'\n"
" or force duplicates by 'perf probe -f'\n"
" or set 'force=yes' in BPF source.\n",
buf);
ret = -EEXIST;
goto out;
}
/* Try to add suffix */
for (i = 1; i < MAX_EVENT_INDEX; i++) {
ret = e_snprintf(buf, len, "%s_%d", nbase, i);
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
goto out;
}
if (!strlist__has_entry(namelist, buf))
break;
}
if (i == MAX_EVENT_INDEX) {
pr_warning("Too many events are on the same function.\n");
ret = -ERANGE;
}
out:
free(nbase);
/* Final validation */
if (ret >= 0 && !is_c_func_name(buf)) {
pr_warning("Internal error: \"%s\" is an invalid event name.\n",
buf);
ret = -EINVAL;
}
return ret;
}
/* Warn if the current kernel's uprobe implementation is old */
static void warn_uprobe_event_compat(struct probe_trace_event *tev)
{
int i;
char *buf = synthesize_probe_trace_command(tev);
struct probe_trace_point *tp = &tev->point;
if (tp->ref_ctr_offset && !uprobe_ref_ctr_is_supported()) {
pr_warning("A semaphore is associated with %s:%s and "
"seems your kernel doesn't support it.\n",
tev->group, tev->event);
}
/* Old uprobe event doesn't support memory dereference */
if (!tev->uprobes || tev->nargs == 0 || !buf)
goto out;
for (i = 0; i < tev->nargs; i++) {
if (strchr(tev->args[i].value, '@')) {
pr_warning("%s accesses a variable by symbol name, but that is not supported for user application probe.\n",
tev->args[i].value);
break;
}
if (strglobmatch(tev->args[i].value, "[$+-]*")) {
pr_warning("Please upgrade your kernel to at least 3.14 to have access to feature %s\n",
tev->args[i].value);
break;
}
}
out:
free(buf);
}
/* Set new name from original perf_probe_event and namelist */
static int probe_trace_event__set_name(struct probe_trace_event *tev,
struct perf_probe_event *pev,
struct strlist *namelist,
bool allow_suffix)
{
const char *event, *group;
char buf[64];
int ret;
/* If probe_event or trace_event already have the name, reuse it */
if (pev->event && !pev->sdt)
event = pev->event;
else if (tev->event)
event = tev->event;
else {
/* Or generate new one from probe point */
if (pev->point.function &&
(strncmp(pev->point.function, "0x", 2) != 0) &&
!strisglob(pev->point.function))
event = pev->point.function;
else
event = tev->point.realname;
}
if (pev->group && !pev->sdt)
group = pev->group;
else if (tev->group)
group = tev->group;
else
group = PERFPROBE_GROUP;
/* Get an unused new event name */
ret = get_new_event_name(buf, 64, event, namelist,
tev->point.retprobe, allow_suffix);
if (ret < 0)
return ret;
event = buf;
tev->event = strdup(event);
tev->group = strdup(group);
if (tev->event == NULL || tev->group == NULL)
return -ENOMEM;
/*
* Add new event name to namelist if multiprobe event is NOT
* supported, since we have to use new event name for following
* probes in that case.
*/
if (!multiprobe_event_is_supported())
strlist__add(namelist, event);
return 0;
}
static int __open_probe_file_and_namelist(bool uprobe,
struct strlist **namelist)
{
int fd;
fd = probe_file__open(PF_FL_RW | (uprobe ? PF_FL_UPROBE : 0));
if (fd < 0)
return fd;
/* Get current event names */
*namelist = probe_file__get_namelist(fd);
if (!(*namelist)) {
pr_debug("Failed to get current event list.\n");
close(fd);
return -ENOMEM;
}
return fd;
}
static int __add_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event *tevs,
int ntevs, bool allow_suffix)
{
int i, fd[2] = {-1, -1}, up, ret;
struct probe_trace_event *tev = NULL;
struct probe_cache *cache = NULL;
struct strlist *namelist[2] = {NULL, NULL};
struct nscookie nsc;
up = pev->uprobes ? 1 : 0;
fd[up] = __open_probe_file_and_namelist(up, &namelist[up]);
if (fd[up] < 0)
return fd[up];
ret = 0;
for (i = 0; i < ntevs; i++) {
tev = &tevs[i];
up = tev->uprobes ? 1 : 0;
if (fd[up] == -1) { /* Open the kprobe/uprobe_events */
fd[up] = __open_probe_file_and_namelist(up,
&namelist[up]);
if (fd[up] < 0)
goto close_out;
}
/* Skip if the symbol is out of .text or blacklisted */
if (!tev->point.symbol && !pev->uprobes)
continue;
/* Set new name for tev (and update namelist) */
ret = probe_trace_event__set_name(tev, pev, namelist[up],
allow_suffix);
if (ret < 0)
break;
nsinfo__mountns_enter(pev->nsi, &nsc);
ret = probe_file__add_event(fd[up], tev);
nsinfo__mountns_exit(&nsc);
if (ret < 0)
break;
/*
* Probes after the first probe which comes from same
* user input are always allowed to add suffix, because
* there might be several addresses corresponding to
* one code line.
*/
allow_suffix = true;
}
if (ret == -EINVAL && pev->uprobes)
warn_uprobe_event_compat(tev);
if (ret == 0 && probe_conf.cache) {
cache = probe_cache__new(pev->target, pev->nsi);
if (!cache ||
probe_cache__add_entry(cache, pev, tevs, ntevs) < 0 ||
probe_cache__commit(cache) < 0)
pr_warning("Failed to add event to probe cache\n");
probe_cache__delete(cache);
}
close_out:
for (up = 0; up < 2; up++) {
strlist__delete(namelist[up]);
if (fd[up] >= 0)
close(fd[up]);
}
return ret;
}
static int find_probe_functions(struct map *map, char *name,
struct symbol **syms)
{
int found = 0;
struct symbol *sym;
struct rb_node *tmp;
const char *norm, *ver;
char *buf = NULL;
bool cut_version = true;
if (map__load(map) < 0)
return -EACCES; /* Possible permission error to load symbols */
/* If user gives a version, don't cut off the version from symbols */
if (strchr(name, '@'))
cut_version = false;
map__for_each_symbol(map, sym, tmp) {
norm = arch__normalize_symbol_name(sym->name);
if (!norm)
continue;
if (cut_version) {
/* We don't care about default symbol or not */
ver = strchr(norm, '@');
if (ver) {
buf = strndup(norm, ver - norm);
if (!buf)
return -ENOMEM;
norm = buf;
}
}
if (strglobmatch(norm, name)) {
found++;
if (syms && found < probe_conf.max_probes)
syms[found - 1] = sym;
}
if (buf)
zfree(&buf);
}
return found;
}
void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
struct probe_trace_event *tev __maybe_unused,
struct map *map __maybe_unused,
struct symbol *sym __maybe_unused) { }
static void pr_kallsyms_access_error(void)
{
pr_err("Please ensure you can read the /proc/kallsyms symbol addresses.\n"
"If /proc/sys/kernel/kptr_restrict is '2', you can not read\n"
"kernel symbol addresses even if you are a superuser. Please change\n"
"it to '1'. If kptr_restrict is '1', the superuser can read the\n"
"symbol addresses.\n"
"In that case, please run this command again with sudo.\n");
}
/*
* Find probe function addresses from map.
* Return an error or the number of found probe_trace_event
*/
static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
struct probe_trace_event **tevs)
{
struct map *map = NULL;
struct ref_reloc_sym *reloc_sym = NULL;
struct symbol *sym;
struct symbol **syms = NULL;
struct probe_trace_event *tev;
struct perf_probe_point *pp = &pev->point;
struct probe_trace_point *tp;
int num_matched_functions;
int ret, i, j, skipped = 0;
char *mod_name;
map = get_target_map(pev->target, pev->nsi, pev->uprobes);
if (!map) {
ret = -EINVAL;
goto out;
}
syms = malloc(sizeof(struct symbol *) * probe_conf.max_probes);
if (!syms) {
ret = -ENOMEM;
goto out;
}
/*
* Load matched symbols: Since the different local symbols may have
* same name but different addresses, this lists all the symbols.
*/
num_matched_functions = find_probe_functions(map, pp->function, syms);
if (num_matched_functions <= 0) {
if (num_matched_functions == -EACCES) {
pr_err("Failed to load symbols from %s\n",
pev->target ?: "/proc/kallsyms");
if (pev->target)
pr_err("Please ensure the file is not stripped.\n");
else
pr_kallsyms_access_error();
} else
pr_err("Failed to find symbol %s in %s\n", pp->function,
pev->target ? : "kernel");
ret = -ENOENT;
goto out;
} else if (num_matched_functions > probe_conf.max_probes) {
pr_err("Too many functions matched in %s\n",
pev->target ? : "kernel");
ret = -E2BIG;
goto out;
}
/* Note that the symbols in the kmodule are not relocated */
if (!pev->uprobes && !pev->target &&
(!pp->retprobe || kretprobe_offset_is_supported())) {
reloc_sym = kernel_get_ref_reloc_sym(NULL);
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found! "
"Check /proc/sys/kernel/kptr_restrict\n"
"and /proc/sys/kernel/perf_event_paranoid. "
"Or run as privileged perf user.\n\n");
ret = -EINVAL;
goto out;
}
}
/* Setup result trace-probe-events */
*tevs = zalloc(sizeof(*tev) * num_matched_functions);
if (!*tevs) {
ret = -ENOMEM;
goto out;
}
ret = 0;
for (j = 0; j < num_matched_functions; j++) {
sym = syms[j];
if (sym->type != STT_FUNC)
continue;
/* There can be duplicated symbols in the map */
for (i = 0; i < j; i++)
if (sym->start == syms[i]->start) {
pr_debug("Found duplicated symbol %s @ %" PRIx64 "\n",
sym->name, sym->start);
break;
}
if (i != j)
continue;
tev = (*tevs) + ret;
tp = &tev->point;
if (ret == num_matched_functions) {
pr_warning("Too many symbols are listed. Skip it.\n");
break;
}
ret++;
if (pp->offset > sym->end - sym->start) {
pr_warning("Offset %ld is bigger than the size of %s\n",
pp->offset, sym->name);
ret = -ENOENT;
goto err_out;
}
/* Add one probe point */
tp->address = map__unmap_ip(map, sym->start) + pp->offset;
/* Check the kprobe (not in module) is within .text */
if (!pev->uprobes && !pev->target &&
kprobe_warn_out_range(sym->name, tp->address)) {
tp->symbol = NULL; /* Skip it */
skipped++;
} else if (reloc_sym) {
tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out);
tp->offset = tp->address - reloc_sym->addr;
} else {
tp->symbol = strdup_or_goto(sym->name, nomem_out);
tp->offset = pp->offset;
}
tp->realname = strdup_or_goto(sym->name, nomem_out);
tp->retprobe = pp->retprobe;
if (pev->target) {
if (pev->uprobes) {
tev->point.module = strdup_or_goto(pev->target,
nomem_out);
} else {
mod_name = find_module_name(pev->target);
tev->point.module =
strdup(mod_name ? mod_name : pev->target);
free(mod_name);
if (!tev->point.module)
goto nomem_out;
}
}
tev->uprobes = pev->uprobes;
tev->nargs = pev->nargs;
if (tev->nargs) {
tev->args = zalloc(sizeof(struct probe_trace_arg) *
tev->nargs);
if (tev->args == NULL)
goto nomem_out;
}
for (i = 0; i < tev->nargs; i++) {
if (pev->args[i].name)
tev->args[i].name =
strdup_or_goto(pev->args[i].name,
nomem_out);
tev->args[i].value = strdup_or_goto(pev->args[i].var,
nomem_out);
if (pev->args[i].type)
tev->args[i].type =
strdup_or_goto(pev->args[i].type,
nomem_out);
}
arch__fix_tev_from_maps(pev, tev, map, sym);
}
if (ret == skipped) {
ret = -ENOENT;
goto err_out;
}
out:
map__put(map);
free(syms);
return ret;
nomem_out:
ret = -ENOMEM;
err_out:
clear_probe_trace_events(*tevs, num_matched_functions);
zfree(tevs);
goto out;
}
static int try_to_find_absolute_address(struct perf_probe_event *pev,
struct probe_trace_event **tevs)
{
struct perf_probe_point *pp = &pev->point;
struct probe_trace_event *tev;
struct probe_trace_point *tp;
int i, err;
if (!(pev->point.function && !strncmp(pev->point.function, "0x", 2)))
return -EINVAL;
if (perf_probe_event_need_dwarf(pev))
return -EINVAL;
/*
* This is 'perf probe /lib/libc.so 0xabcd'. Try to probe at
* absolute address.
*
* Only one tev can be generated by this.
*/
*tevs = zalloc(sizeof(*tev));
if (!*tevs)
return -ENOMEM;
tev = *tevs;
tp = &tev->point;
/*
* Don't use tp->offset, use address directly, because
* in synthesize_probe_trace_command() address cannot be
* zero.
*/
tp->address = pev->point.abs_address;
tp->retprobe = pp->retprobe;
tev->uprobes = pev->uprobes;
err = -ENOMEM;
/*
* Give it a '0x' leading symbol name.
* In __add_probe_trace_events, a NULL symbol is interpreted as
* invalid.
*/
if (asprintf(&tp->symbol, "0x%" PRIx64, tp->address) < 0)
goto errout;
/* For kprobe, check range */
if ((!tev->uprobes) &&
(kprobe_warn_out_range(tev->point.symbol,
tev->point.address))) {
err = -EACCES;
goto errout;
}
if (asprintf(&tp->realname, "abs_%" PRIx64, tp->address) < 0)
goto errout;
if (pev->target) {
tp->module = strdup(pev->target);
if (!tp->module)
goto errout;
}
if (tev->group) {
tev->group = strdup(pev->group);
if (!tev->group)
goto errout;
}
if (pev->event) {
tev->event = strdup(pev->event);
if (!tev->event)
goto errout;
}
tev->nargs = pev->nargs;
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
if (!tev->args)
goto errout;
for (i = 0; i < tev->nargs; i++)
copy_to_probe_trace_arg(&tev->args[i], &pev->args[i]);
return 1;
errout:
clear_probe_trace_events(*tevs, 1);
*tevs = NULL;
return err;
}
/* Concatenate two arrays */
static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
{
void *ret;
ret = malloc(sz_a + sz_b);
if (ret) {
memcpy(ret, a, sz_a);
memcpy(ret + sz_a, b, sz_b);
}
return ret;
}
static int
concat_probe_trace_events(struct probe_trace_event **tevs, int *ntevs,
struct probe_trace_event **tevs2, int ntevs2)
{
struct probe_trace_event *new_tevs;
int ret = 0;
if (*ntevs == 0) {
*tevs = *tevs2;
*ntevs = ntevs2;
*tevs2 = NULL;
return 0;
}
if (*ntevs + ntevs2 > probe_conf.max_probes)
ret = -E2BIG;
else {
/* Concatenate the array of probe_trace_event */
new_tevs = memcat(*tevs, (*ntevs) * sizeof(**tevs),
*tevs2, ntevs2 * sizeof(**tevs2));
if (!new_tevs)
ret = -ENOMEM;
else {
free(*tevs);
*tevs = new_tevs;
*ntevs += ntevs2;
}
}
if (ret < 0)
clear_probe_trace_events(*tevs2, ntevs2);
zfree(tevs2);
return ret;
}
/*
* Try to find probe_trace_event from given probe caches. Return the number
* of cached events found, if an error occurs return the error.
*/
static int find_cached_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
const char *target)
{
struct probe_cache *cache;
struct probe_cache_entry *entry;
struct probe_trace_event *tmp_tevs = NULL;
int ntevs = 0;
int ret = 0;
cache = probe_cache__new(target, pev->nsi);
/* Return 0 ("not found") if the target has no probe cache. */
if (!cache)
return 0;
for_each_probe_cache_entry(entry, cache) {
/* Skip the cache entry which has no name */
if (!entry->pev.event || !entry->pev.group)
continue;
if ((!pev->group || strglobmatch(entry->pev.group, pev->group)) &&
strglobmatch(entry->pev.event, pev->event)) {
ret = probe_cache_entry__get_event(entry, &tmp_tevs);
if (ret > 0)
ret = concat_probe_trace_events(tevs, &ntevs,
&tmp_tevs, ret);
if (ret < 0)
break;
}
}
probe_cache__delete(cache);
if (ret < 0) {
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
} else {
ret = ntevs;
if (ntevs > 0 && target && target[0] == '/')
pev->uprobes = true;
}
return ret;
}
/* Try to find probe_trace_event from all probe caches */
static int find_cached_events_all(struct perf_probe_event *pev,
struct probe_trace_event **tevs)
{
struct probe_trace_event *tmp_tevs = NULL;
struct strlist *bidlist;
struct str_node *nd;
char *pathname;
int ntevs = 0;
int ret;
/* Get the buildid list of all valid caches */
bidlist = build_id_cache__list_all(true);
if (!bidlist) {
ret = -errno;
pr_debug("Failed to get buildids: %d\n", ret);
return ret;
}
ret = 0;
strlist__for_each_entry(nd, bidlist) {
pathname = build_id_cache__origname(nd->s);
ret = find_cached_events(pev, &tmp_tevs, pathname);
/* In the case of cnt == 0, we just skip it */
if (ret > 0)
ret = concat_probe_trace_events(tevs, &ntevs,
&tmp_tevs, ret);
free(pathname);
if (ret < 0)
break;
}
strlist__delete(bidlist);
if (ret < 0) {
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
} else
ret = ntevs;
return ret;
}
static int find_probe_trace_events_from_cache(struct perf_probe_event *pev,
struct probe_trace_event **tevs)
{
struct probe_cache *cache;
struct probe_cache_entry *entry;
struct probe_trace_event *tev;
struct str_node *node;
int ret, i;
if (pev->sdt) {
/* For SDT/cached events, we use special search functions */
if (!pev->target)
return find_cached_events_all(pev, tevs);
else
return find_cached_events(pev, tevs, pev->target);
}
cache = probe_cache__new(pev->target, pev->nsi);
if (!cache)
return 0;
entry = probe_cache__find(cache, pev);
if (!entry) {
/* SDT must be in the cache */
ret = pev->sdt ? -ENOENT : 0;
goto out;
}
ret = strlist__nr_entries(entry->tevlist);
if (ret > probe_conf.max_probes) {
pr_debug("Too many entries matched in the cache of %s\n",
pev->target ? : "kernel");
ret = -E2BIG;
goto out;
}
*tevs = zalloc(ret * sizeof(*tev));
if (!*tevs) {
ret = -ENOMEM;
goto out;
}
i = 0;
strlist__for_each_entry(node, entry->tevlist) {
tev = &(*tevs)[i++];
ret = parse_probe_trace_command(node->s, tev);
if (ret < 0)
goto out;
/* Set the uprobes attribute as same as original */
tev->uprobes = pev->uprobes;
}
ret = i;
out:
probe_cache__delete(cache);
return ret;
}
static int convert_to_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs)
{
int ret;
if (!pev->group && !pev->sdt) {
/* Set group name if not given */
if (!pev->uprobes) {
pev->group = strdup(PERFPROBE_GROUP);
ret = pev->group ? 0 : -ENOMEM;
} else
ret = convert_exec_to_group(pev->target, &pev->group);
if (ret != 0) {
pr_warning("Failed to make a group name.\n");
return ret;
}
}
ret = try_to_find_absolute_address(pev, tevs);
if (ret > 0)
return ret;
/* At first, we need to lookup cache entry */
ret = find_probe_trace_events_from_cache(pev, tevs);
if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */
return ret == 0 ? -ENOENT : ret; /* Found in probe cache */
/* Convert perf_probe_event with debuginfo */
ret = try_to_find_probe_trace_events(pev, tevs);
if (ret != 0)
return ret; /* Found in debuginfo or got an error */
return find_probe_trace_events_from_map(pev, tevs);
}
int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs)
{
int i, ret;
/* Loop 1: convert all events */
for (i = 0; i < npevs; i++) {
/* Init kprobe blacklist if needed */
if (!pevs[i].uprobes)
kprobe_blacklist__init();
/* Convert with or without debuginfo */
ret = convert_to_probe_trace_events(&pevs[i], &pevs[i].tevs);
if (ret < 0)
return ret;
pevs[i].ntevs = ret;
}
/* This just release blacklist only if allocated */
kprobe_blacklist__release();
return 0;
}
static int show_probe_trace_event(struct probe_trace_event *tev)
{
char *buf = synthesize_probe_trace_command(tev);
if (!buf) {
pr_debug("Failed to synthesize probe trace event.\n");
return -EINVAL;
}
/* Showing definition always go stdout */
printf("%s\n", buf);
free(buf);
return 0;
}
int show_probe_trace_events(struct perf_probe_event *pevs, int npevs)
{
struct strlist *namelist = strlist__new(NULL, NULL);
struct probe_trace_event *tev;
struct perf_probe_event *pev;
int i, j, ret = 0;
if (!namelist)
return -ENOMEM;
for (j = 0; j < npevs && !ret; j++) {
pev = &pevs[j];
for (i = 0; i < pev->ntevs && !ret; i++) {
tev = &pev->tevs[i];
/* Skip if the symbol is out of .text or blacklisted */
if (!tev->point.symbol && !pev->uprobes)
continue;
/* Set new name for tev (and update namelist) */
ret = probe_trace_event__set_name(tev, pev,
namelist, true);
if (!ret)
ret = show_probe_trace_event(tev);
}
}
strlist__delete(namelist);
return ret;
}
static int show_bootconfig_event(struct probe_trace_event *tev)
{
struct probe_trace_point *tp = &tev->point;
struct strbuf buf;
char *ret = NULL;
int err;
if (strbuf_init(&buf, 32) < 0)
return -ENOMEM;
err = synthesize_kprobe_trace_def(tp, &buf);
if (err >= 0)
err = synthesize_probe_trace_args(tev, &buf);
if (err >= 0)
ret = strbuf_detach(&buf, NULL);
strbuf_release(&buf);
if (ret) {
printf("'%s'", ret);
free(ret);
}
return err;
}
int show_bootconfig_events(struct perf_probe_event *pevs, int npevs)
{
struct strlist *namelist = strlist__new(NULL, NULL);
struct probe_trace_event *tev;
struct perf_probe_event *pev;
char *cur_name = NULL;
int i, j, ret = 0;
if (!namelist)
return -ENOMEM;
for (j = 0; j < npevs && !ret; j++) {
pev = &pevs[j];
if (pev->group && strcmp(pev->group, "probe"))
pr_warning("WARN: Group name %s is ignored\n", pev->group);
if (pev->uprobes) {
pr_warning("ERROR: Bootconfig doesn't support uprobes\n");
ret = -EINVAL;
break;
}
for (i = 0; i < pev->ntevs && !ret; i++) {
tev = &pev->tevs[i];
/* Skip if the symbol is out of .text or blacklisted */
if (!tev->point.symbol && !pev->uprobes)
continue;
/* Set new name for tev (and update namelist) */
ret = probe_trace_event__set_name(tev, pev,
namelist, true);
if (ret)
break;
if (!cur_name || strcmp(cur_name, tev->event)) {
printf("%sftrace.event.kprobes.%s.probe = ",
cur_name ? "\n" : "", tev->event);
cur_name = tev->event;
} else
printf(", ");
ret = show_bootconfig_event(tev);
}
}
printf("\n");
strlist__delete(namelist);
return ret;
}
int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs)
{
int i, ret = 0;
/* Loop 2: add all events */
for (i = 0; i < npevs; i++) {
ret = __add_probe_trace_events(&pevs[i], pevs[i].tevs,
pevs[i].ntevs,
probe_conf.force_add);
if (ret < 0)
break;
}
return ret;
}
void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs)
{
int i, j;
struct perf_probe_event *pev;
/* Loop 3: cleanup and free trace events */
for (i = 0; i < npevs; i++) {
pev = &pevs[i];
for (j = 0; j < pevs[i].ntevs; j++)
clear_probe_trace_event(&pevs[i].tevs[j]);
zfree(&pevs[i].tevs);
pevs[i].ntevs = 0;
nsinfo__zput(pev->nsi);
clear_perf_probe_event(&pevs[i]);
}
}
int add_perf_probe_events(struct perf_probe_event *pevs, int npevs)
{
int ret;
ret = init_probe_symbol_maps(pevs->uprobes);
if (ret < 0)
return ret;
ret = convert_perf_probe_events(pevs, npevs);
if (ret == 0)
ret = apply_perf_probe_events(pevs, npevs);
cleanup_perf_probe_events(pevs, npevs);
exit_probe_symbol_maps();
return ret;
}
int del_perf_probe_events(struct strfilter *filter)
{
int ret, ret2, ufd = -1, kfd = -1;
char *str = strfilter__string(filter);
if (!str)
return -EINVAL;
/* Get current event names */
ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
if (ret < 0)
goto out;
ret = probe_file__del_events(kfd, filter);
if (ret < 0 && ret != -ENOENT)
goto error;
ret2 = probe_file__del_events(ufd, filter);
if (ret2 < 0 && ret2 != -ENOENT) {
ret = ret2;
goto error;
}
ret = 0;
error:
if (kfd >= 0)
close(kfd);
if (ufd >= 0)
close(ufd);
out:
free(str);
return ret;
}
int show_available_funcs(const char *target, struct nsinfo *nsi,
struct strfilter *_filter, bool user)
{
struct map *map;
struct dso *dso;
int ret;
ret = init_probe_symbol_maps(user);
if (ret < 0)
return ret;
/* Get a symbol map */
map = get_target_map(target, nsi, user);
if (!map) {
pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
return -EINVAL;
}
ret = map__load(map);
if (ret) {
if (ret == -2) {
char *str = strfilter__string(_filter);
pr_err("Failed to find symbols matched to \"%s\"\n",
str);
free(str);
} else
pr_err("Failed to load symbols in %s\n",
(target) ? : "kernel");
goto end;
}
dso = map__dso(map);
dso__sort_by_name(dso);
/* Show all (filtered) symbols */
setup_pager();
for (size_t i = 0; i < dso->symbol_names_len; i++) {
struct symbol *pos = dso->symbol_names[i];
if (strfilter__compare(_filter, pos->name))
printf("%s\n", pos->name);
}
end:
map__put(map);
exit_probe_symbol_maps();
return ret;
}
int copy_to_probe_trace_arg(struct probe_trace_arg *tvar,
struct perf_probe_arg *pvar)
{
tvar->value = strdup(pvar->var);
if (tvar->value == NULL)
return -ENOMEM;
if (pvar->type) {
tvar->type = strdup(pvar->type);
if (tvar->type == NULL)
return -ENOMEM;
}
if (pvar->name) {
tvar->name = strdup(pvar->name);
if (tvar->name == NULL)
return -ENOMEM;
} else
tvar->name = NULL;
return 0;
}
| linux-master | tools/perf/util/probe-event.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <string.h>
#include "perf_regs.h"
#include "util/sample.h"
#include "debug.h"
int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
char **new_op __maybe_unused)
{
return SDT_ARG_SKIP;
}
uint64_t __weak arch__intr_reg_mask(void)
{
return 0;
}
uint64_t __weak arch__user_reg_mask(void)
{
return 0;
}
#ifdef HAVE_PERF_REGS_SUPPORT
const char *perf_reg_name(int id, const char *arch)
{
const char *reg_name = NULL;
if (!strcmp(arch, "csky"))
reg_name = __perf_reg_name_csky(id);
else if (!strcmp(arch, "loongarch"))
reg_name = __perf_reg_name_loongarch(id);
else if (!strcmp(arch, "mips"))
reg_name = __perf_reg_name_mips(id);
else if (!strcmp(arch, "powerpc"))
reg_name = __perf_reg_name_powerpc(id);
else if (!strcmp(arch, "riscv"))
reg_name = __perf_reg_name_riscv(id);
else if (!strcmp(arch, "s390"))
reg_name = __perf_reg_name_s390(id);
else if (!strcmp(arch, "x86"))
reg_name = __perf_reg_name_x86(id);
else if (!strcmp(arch, "arm"))
reg_name = __perf_reg_name_arm(id);
else if (!strcmp(arch, "arm64"))
reg_name = __perf_reg_name_arm64(id);
return reg_name ?: "unknown";
}
int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
{
int i, idx = 0;
u64 mask = regs->mask;
if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE)
return -EINVAL;
if (regs->cache_mask & (1ULL << id))
goto out;
if (!(mask & (1ULL << id)))
return -EINVAL;
for (i = 0; i < id; i++) {
if (mask & (1ULL << i))
idx++;
}
regs->cache_mask |= (1ULL << id);
regs->cache_regs[id] = regs->regs[idx];
out:
*valp = regs->cache_regs[id];
return 0;
}
uint64_t perf_arch_reg_ip(const char *arch)
{
if (!strcmp(arch, "arm"))
return __perf_reg_ip_arm();
else if (!strcmp(arch, "arm64"))
return __perf_reg_ip_arm64();
else if (!strcmp(arch, "csky"))
return __perf_reg_ip_csky();
else if (!strcmp(arch, "loongarch"))
return __perf_reg_ip_loongarch();
else if (!strcmp(arch, "mips"))
return __perf_reg_ip_mips();
else if (!strcmp(arch, "powerpc"))
return __perf_reg_ip_powerpc();
else if (!strcmp(arch, "riscv"))
return __perf_reg_ip_riscv();
else if (!strcmp(arch, "s390"))
return __perf_reg_ip_s390();
else if (!strcmp(arch, "x86"))
return __perf_reg_ip_x86();
pr_err("Fail to find IP register for arch %s, returns 0\n", arch);
return 0;
}
uint64_t perf_arch_reg_sp(const char *arch)
{
if (!strcmp(arch, "arm"))
return __perf_reg_sp_arm();
else if (!strcmp(arch, "arm64"))
return __perf_reg_sp_arm64();
else if (!strcmp(arch, "csky"))
return __perf_reg_sp_csky();
else if (!strcmp(arch, "loongarch"))
return __perf_reg_sp_loongarch();
else if (!strcmp(arch, "mips"))
return __perf_reg_sp_mips();
else if (!strcmp(arch, "powerpc"))
return __perf_reg_sp_powerpc();
else if (!strcmp(arch, "riscv"))
return __perf_reg_sp_riscv();
else if (!strcmp(arch, "s390"))
return __perf_reg_sp_s390();
else if (!strcmp(arch, "x86"))
return __perf_reg_sp_x86();
pr_err("Fail to find SP register for arch %s, returns 0\n", arch);
return 0;
}
#endif
| linux-master | tools/perf/util/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* thread-stack.c: Synthesize a thread's stack using call / return events
* Copyright (c) 2014, Intel Corporation.
*/
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "thread.h"
#include "event.h"
#include "machine.h"
#include "env.h"
#include "debug.h"
#include "symbol.h"
#include "comm.h"
#include "call-path.h"
#include "thread-stack.h"
#define STACK_GROWTH 2048
/*
* State of retpoline detection.
*
* RETPOLINE_NONE: no retpoline detection
* X86_RETPOLINE_POSSIBLE: x86 retpoline possible
* X86_RETPOLINE_DETECTED: x86 retpoline detected
*/
enum retpoline_state_t {
RETPOLINE_NONE,
X86_RETPOLINE_POSSIBLE,
X86_RETPOLINE_DETECTED,
};
/**
* struct thread_stack_entry - thread stack entry.
* @ret_addr: return address
* @timestamp: timestamp (if known)
* @ref: external reference (e.g. db_id of sample)
* @branch_count: the branch count when the entry was created
* @insn_count: the instruction count when the entry was created
* @cyc_count the cycle count when the entry was created
* @db_id: id used for db-export
* @cp: call path
* @no_call: a 'call' was not seen
* @trace_end: a 'call' but trace ended
* @non_call: a branch but not a 'call' to the start of a different symbol
*/
struct thread_stack_entry {
u64 ret_addr;
u64 timestamp;
u64 ref;
u64 branch_count;
u64 insn_count;
u64 cyc_count;
u64 db_id;
struct call_path *cp;
bool no_call;
bool trace_end;
bool non_call;
};
/**
* struct thread_stack - thread stack constructed from 'call' and 'return'
* branch samples.
* @stack: array that holds the stack
* @cnt: number of entries in the stack
* @sz: current maximum stack size
* @trace_nr: current trace number
* @branch_count: running branch count
* @insn_count: running instruction count
* @cyc_count running cycle count
* @kernel_start: kernel start address
* @last_time: last timestamp
* @crp: call/return processor
* @comm: current comm
* @arr_sz: size of array if this is the first element of an array
* @rstate: used to detect retpolines
* @br_stack_rb: branch stack (ring buffer)
* @br_stack_sz: maximum branch stack size
* @br_stack_pos: current position in @br_stack_rb
* @mispred_all: mark all branches as mispredicted
*/
struct thread_stack {
struct thread_stack_entry *stack;
size_t cnt;
size_t sz;
u64 trace_nr;
u64 branch_count;
u64 insn_count;
u64 cyc_count;
u64 kernel_start;
u64 last_time;
struct call_return_processor *crp;
struct comm *comm;
unsigned int arr_sz;
enum retpoline_state_t rstate;
struct branch_stack *br_stack_rb;
unsigned int br_stack_sz;
unsigned int br_stack_pos;
bool mispred_all;
};
/*
* Assume pid == tid == 0 identifies the idle task as defined by
* perf_session__register_idle_thread(). The idle task is really 1 task per cpu,
* and therefore requires a stack for each cpu.
*/
static inline bool thread_stack__per_cpu(struct thread *thread)
{
return !(thread__tid(thread) || thread__pid(thread));
}
static int thread_stack__grow(struct thread_stack *ts)
{
struct thread_stack_entry *new_stack;
size_t sz, new_sz;
new_sz = ts->sz + STACK_GROWTH;
sz = new_sz * sizeof(struct thread_stack_entry);
new_stack = realloc(ts->stack, sz);
if (!new_stack)
return -ENOMEM;
ts->stack = new_stack;
ts->sz = new_sz;
return 0;
}
static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
struct call_return_processor *crp,
bool callstack, unsigned int br_stack_sz)
{
int err;
if (callstack) {
err = thread_stack__grow(ts);
if (err)
return err;
}
if (br_stack_sz) {
size_t sz = sizeof(struct branch_stack);
sz += br_stack_sz * sizeof(struct branch_entry);
ts->br_stack_rb = zalloc(sz);
if (!ts->br_stack_rb)
return -ENOMEM;
ts->br_stack_sz = br_stack_sz;
}
if (thread__maps(thread) && maps__machine(thread__maps(thread))) {
struct machine *machine = maps__machine(thread__maps(thread));
const char *arch = perf_env__arch(machine->env);
ts->kernel_start = machine__kernel_start(machine);
if (!strcmp(arch, "x86"))
ts->rstate = X86_RETPOLINE_POSSIBLE;
} else {
ts->kernel_start = 1ULL << 63;
}
ts->crp = crp;
return 0;
}
static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
struct call_return_processor *crp,
bool callstack,
unsigned int br_stack_sz)
{
struct thread_stack *ts = thread__ts(thread), *new_ts;
unsigned int old_sz = ts ? ts->arr_sz : 0;
unsigned int new_sz = 1;
if (thread_stack__per_cpu(thread) && cpu > 0)
new_sz = roundup_pow_of_two(cpu + 1);
if (!ts || new_sz > old_sz) {
new_ts = calloc(new_sz, sizeof(*ts));
if (!new_ts)
return NULL;
if (ts)
memcpy(new_ts, ts, old_sz * sizeof(*ts));
new_ts->arr_sz = new_sz;
free(thread__ts(thread));
thread__set_ts(thread, new_ts);
ts = new_ts;
}
if (thread_stack__per_cpu(thread) && cpu > 0 &&
(unsigned int)cpu < ts->arr_sz)
ts += cpu;
if (!ts->stack &&
thread_stack__init(ts, thread, crp, callstack, br_stack_sz))
return NULL;
return ts;
}
static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
{
struct thread_stack *ts = thread__ts(thread);
if (cpu < 0)
cpu = 0;
if (!ts || (unsigned int)cpu >= ts->arr_sz)
return NULL;
ts += cpu;
if (!ts->stack)
return NULL;
return ts;
}
static inline struct thread_stack *thread__stack(struct thread *thread,
int cpu)
{
if (!thread)
return NULL;
if (thread_stack__per_cpu(thread))
return thread__cpu_stack(thread, cpu);
return thread__ts(thread);
}
static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
bool trace_end)
{
int err = 0;
if (ts->cnt == ts->sz) {
err = thread_stack__grow(ts);
if (err) {
pr_warning("Out of memory: discarding thread stack\n");
ts->cnt = 0;
}
}
ts->stack[ts->cnt].trace_end = trace_end;
ts->stack[ts->cnt++].ret_addr = ret_addr;
return err;
}
static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
{
size_t i;
/*
* In some cases there may be functions which are not seen to return.
* For example when setjmp / longjmp has been used. Or the perf context
* switch in the kernel which doesn't stop and start tracing in exactly
* the same code path. When that happens the return address will be
* further down the stack. If the return address is not found at all,
* we assume the opposite (i.e. this is a return for a call that wasn't
* seen for some reason) and leave the stack alone.
*/
for (i = ts->cnt; i; ) {
if (ts->stack[--i].ret_addr == ret_addr) {
ts->cnt = i;
return;
}
}
}
static void thread_stack__pop_trace_end(struct thread_stack *ts)
{
size_t i;
for (i = ts->cnt; i; ) {
if (ts->stack[--i].trace_end)
ts->cnt = i;
else
return;
}
}
static bool thread_stack__in_kernel(struct thread_stack *ts)
{
if (!ts->cnt)
return false;
return ts->stack[ts->cnt - 1].cp->in_kernel;
}
static int thread_stack__call_return(struct thread *thread,
struct thread_stack *ts, size_t idx,
u64 timestamp, u64 ref, bool no_return)
{
struct call_return_processor *crp = ts->crp;
struct thread_stack_entry *tse;
struct call_return cr = {
.thread = thread,
.comm = ts->comm,
.db_id = 0,
};
u64 *parent_db_id;
tse = &ts->stack[idx];
cr.cp = tse->cp;
cr.call_time = tse->timestamp;
cr.return_time = timestamp;
cr.branch_count = ts->branch_count - tse->branch_count;
cr.insn_count = ts->insn_count - tse->insn_count;
cr.cyc_count = ts->cyc_count - tse->cyc_count;
cr.db_id = tse->db_id;
cr.call_ref = tse->ref;
cr.return_ref = ref;
if (tse->no_call)
cr.flags |= CALL_RETURN_NO_CALL;
if (no_return)
cr.flags |= CALL_RETURN_NO_RETURN;
if (tse->non_call)
cr.flags |= CALL_RETURN_NON_CALL;
/*
* The parent db_id must be assigned before exporting the child. Note
* it is not possible to export the parent first because its information
* is not yet complete because its 'return' has not yet been processed.
*/
parent_db_id = idx ? &(tse - 1)->db_id : NULL;
return crp->process(&cr, parent_db_id, crp->data);
}
static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
{
struct call_return_processor *crp = ts->crp;
int err;
if (!crp) {
ts->cnt = 0;
ts->br_stack_pos = 0;
if (ts->br_stack_rb)
ts->br_stack_rb->nr = 0;
return 0;
}
while (ts->cnt) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
ts->last_time, 0, true);
if (err) {
pr_err("Error flushing thread stack!\n");
ts->cnt = 0;
return err;
}
}
return 0;
}
int thread_stack__flush(struct thread *thread)
{
struct thread_stack *ts = thread__ts(thread);
unsigned int pos;
int err = 0;
if (ts) {
for (pos = 0; pos < ts->arr_sz; pos++) {
int ret = __thread_stack__flush(thread, ts + pos);
if (ret)
err = ret;
}
}
return err;
}
static void thread_stack__update_br_stack(struct thread_stack *ts, u32 flags,
u64 from_ip, u64 to_ip)
{
struct branch_stack *bs = ts->br_stack_rb;
struct branch_entry *be;
if (!ts->br_stack_pos)
ts->br_stack_pos = ts->br_stack_sz;
ts->br_stack_pos -= 1;
be = &bs->entries[ts->br_stack_pos];
be->from = from_ip;
be->to = to_ip;
be->flags.value = 0;
be->flags.abort = !!(flags & PERF_IP_FLAG_TX_ABORT);
be->flags.in_tx = !!(flags & PERF_IP_FLAG_IN_TX);
/* No support for mispredict */
be->flags.mispred = ts->mispred_all;
if (bs->nr < ts->br_stack_sz)
bs->nr += 1;
}
int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
u64 to_ip, u16 insn_len, u64 trace_nr, bool callstack,
unsigned int br_stack_sz, bool mispred_all)
{
struct thread_stack *ts = thread__stack(thread, cpu);
if (!thread)
return -EINVAL;
if (!ts) {
ts = thread_stack__new(thread, cpu, NULL, callstack, br_stack_sz);
if (!ts) {
pr_warning("Out of memory: no thread stack\n");
return -ENOMEM;
}
ts->trace_nr = trace_nr;
ts->mispred_all = mispred_all;
}
/*
* When the trace is discontinuous, the trace_nr changes. In that case
* the stack might be completely invalid. Better to report nothing than
* to report something misleading, so flush the stack.
*/
if (trace_nr != ts->trace_nr) {
if (ts->trace_nr)
__thread_stack__flush(thread, ts);
ts->trace_nr = trace_nr;
}
if (br_stack_sz)
thread_stack__update_br_stack(ts, flags, from_ip, to_ip);
/*
* Stop here if thread_stack__process() is in use, or not recording call
* stack.
*/
if (ts->crp || !callstack)
return 0;
if (flags & PERF_IP_FLAG_CALL) {
u64 ret_addr;
if (!to_ip)
return 0;
ret_addr = from_ip + insn_len;
if (ret_addr == to_ip)
return 0; /* Zero-length calls are excluded */
return thread_stack__push(ts, ret_addr,
flags & PERF_IP_FLAG_TRACE_END);
} else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
/*
* If the caller did not change the trace number (which would
* have flushed the stack) then try to make sense of the stack.
* Possibly, tracing began after returning to the current
* address, so try to pop that. Also, do not expect a call made
* when the trace ended, to return, so pop that.
*/
thread_stack__pop(ts, to_ip);
thread_stack__pop_trace_end(ts);
} else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
thread_stack__pop(ts, to_ip);
}
return 0;
}
void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
{
struct thread_stack *ts = thread__stack(thread, cpu);
if (!ts)
return;
if (trace_nr != ts->trace_nr) {
if (ts->trace_nr)
__thread_stack__flush(thread, ts);
ts->trace_nr = trace_nr;
}
}
static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
{
__thread_stack__flush(thread, ts);
zfree(&ts->stack);
zfree(&ts->br_stack_rb);
}
static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
{
unsigned int arr_sz = ts->arr_sz;
__thread_stack__free(thread, ts);
memset(ts, 0, sizeof(*ts));
ts->arr_sz = arr_sz;
}
void thread_stack__free(struct thread *thread)
{
struct thread_stack *ts = thread__ts(thread);
unsigned int pos;
if (ts) {
for (pos = 0; pos < ts->arr_sz; pos++)
__thread_stack__free(thread, ts + pos);
free(thread__ts(thread));
thread__set_ts(thread, NULL);
}
}
static inline u64 callchain_context(u64 ip, u64 kernel_start)
{
return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
}
void thread_stack__sample(struct thread *thread, int cpu,
struct ip_callchain *chain,
size_t sz, u64 ip, u64 kernel_start)
{
struct thread_stack *ts = thread__stack(thread, cpu);
u64 context = callchain_context(ip, kernel_start);
u64 last_context;
size_t i, j;
if (sz < 2) {
chain->nr = 0;
return;
}
chain->ips[0] = context;
chain->ips[1] = ip;
if (!ts) {
chain->nr = 2;
return;
}
last_context = context;
for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
ip = ts->stack[ts->cnt - j].ret_addr;
context = callchain_context(ip, kernel_start);
if (context != last_context) {
if (i >= sz - 1)
break;
chain->ips[i++] = context;
last_context = context;
}
chain->ips[i] = ip;
}
chain->nr = i;
}
/*
* Hardware sample records, created some time after the event occurred, need to
* have subsequent addresses removed from the call chain.
*/
void thread_stack__sample_late(struct thread *thread, int cpu,
struct ip_callchain *chain, size_t sz,
u64 sample_ip, u64 kernel_start)
{
struct thread_stack *ts = thread__stack(thread, cpu);
u64 sample_context = callchain_context(sample_ip, kernel_start);
u64 last_context, context, ip;
size_t nr = 0, j;
if (sz < 2) {
chain->nr = 0;
return;
}
if (!ts)
goto out;
/*
* When tracing kernel space, kernel addresses occur at the top of the
* call chain after the event occurred but before tracing stopped.
* Skip them.
*/
for (j = 1; j <= ts->cnt; j++) {
ip = ts->stack[ts->cnt - j].ret_addr;
context = callchain_context(ip, kernel_start);
if (context == PERF_CONTEXT_USER ||
(context == sample_context && ip == sample_ip))
break;
}
last_context = sample_ip; /* Use sample_ip as an invalid context */
for (; nr < sz && j <= ts->cnt; nr++, j++) {
ip = ts->stack[ts->cnt - j].ret_addr;
context = callchain_context(ip, kernel_start);
if (context != last_context) {
if (nr >= sz - 1)
break;
chain->ips[nr++] = context;
last_context = context;
}
chain->ips[nr] = ip;
}
out:
if (nr) {
chain->nr = nr;
} else {
chain->ips[0] = sample_context;
chain->ips[1] = sample_ip;
chain->nr = 2;
}
}
void thread_stack__br_sample(struct thread *thread, int cpu,
struct branch_stack *dst, unsigned int sz)
{
struct thread_stack *ts = thread__stack(thread, cpu);
const size_t bsz = sizeof(struct branch_entry);
struct branch_stack *src;
struct branch_entry *be;
unsigned int nr;
dst->nr = 0;
if (!ts)
return;
src = ts->br_stack_rb;
if (!src->nr)
return;
dst->nr = min((unsigned int)src->nr, sz);
be = &dst->entries[0];
nr = min(ts->br_stack_sz - ts->br_stack_pos, (unsigned int)dst->nr);
memcpy(be, &src->entries[ts->br_stack_pos], bsz * nr);
if (src->nr >= ts->br_stack_sz) {
sz -= nr;
be = &dst->entries[nr];
nr = min(ts->br_stack_pos, sz);
memcpy(be, &src->entries[0], bsz * ts->br_stack_pos);
}
}
/* Start of user space branch entries */
static bool us_start(struct branch_entry *be, u64 kernel_start, bool *start)
{
if (!*start)
*start = be->to && be->to < kernel_start;
return *start;
}
/*
* Start of branch entries after the ip fell in between 2 branches, or user
* space branch entries.
*/
static bool ks_start(struct branch_entry *be, u64 sample_ip, u64 kernel_start,
bool *start, struct branch_entry *nb)
{
if (!*start) {
*start = (nb && sample_ip >= be->to && sample_ip <= nb->from) ||
be->from < kernel_start ||
(be->to && be->to < kernel_start);
}
return *start;
}
/*
* Hardware sample records, created some time after the event occurred, need to
* have subsequent addresses removed from the branch stack.
*/
void thread_stack__br_sample_late(struct thread *thread, int cpu,
struct branch_stack *dst, unsigned int sz,
u64 ip, u64 kernel_start)
{
struct thread_stack *ts = thread__stack(thread, cpu);
struct branch_entry *d, *s, *spos, *ssz;
struct branch_stack *src;
unsigned int nr = 0;
bool start = false;
dst->nr = 0;
if (!ts)
return;
src = ts->br_stack_rb;
if (!src->nr)
return;
spos = &src->entries[ts->br_stack_pos];
ssz = &src->entries[ts->br_stack_sz];
d = &dst->entries[0];
s = spos;
if (ip < kernel_start) {
/*
* User space sample: start copying branch entries when the
* branch is in user space.
*/
for (s = spos; s < ssz && nr < sz; s++) {
if (us_start(s, kernel_start, &start)) {
*d++ = *s;
nr += 1;
}
}
if (src->nr >= ts->br_stack_sz) {
for (s = &src->entries[0]; s < spos && nr < sz; s++) {
if (us_start(s, kernel_start, &start)) {
*d++ = *s;
nr += 1;
}
}
}
} else {
struct branch_entry *nb = NULL;
/*
* Kernel space sample: start copying branch entries when the ip
* falls in between 2 branches (or the branch is in user space
* because then the start must have been missed).
*/
for (s = spos; s < ssz && nr < sz; s++) {
if (ks_start(s, ip, kernel_start, &start, nb)) {
*d++ = *s;
nr += 1;
}
nb = s;
}
if (src->nr >= ts->br_stack_sz) {
for (s = &src->entries[0]; s < spos && nr < sz; s++) {
if (ks_start(s, ip, kernel_start, &start, nb)) {
*d++ = *s;
nr += 1;
}
nb = s;
}
}
}
dst->nr = nr;
}
struct call_return_processor *
call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data)
{
struct call_return_processor *crp;
crp = zalloc(sizeof(struct call_return_processor));
if (!crp)
return NULL;
crp->cpr = call_path_root__new();
if (!crp->cpr)
goto out_free;
crp->process = process;
crp->data = data;
return crp;
out_free:
free(crp);
return NULL;
}
void call_return_processor__free(struct call_return_processor *crp)
{
if (crp) {
call_path_root__free(crp->cpr);
free(crp);
}
}
static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
u64 timestamp, u64 ref, struct call_path *cp,
bool no_call, bool trace_end)
{
struct thread_stack_entry *tse;
int err;
if (!cp)
return -ENOMEM;
if (ts->cnt == ts->sz) {
err = thread_stack__grow(ts);
if (err)
return err;
}
tse = &ts->stack[ts->cnt++];
tse->ret_addr = ret_addr;
tse->timestamp = timestamp;
tse->ref = ref;
tse->branch_count = ts->branch_count;
tse->insn_count = ts->insn_count;
tse->cyc_count = ts->cyc_count;
tse->cp = cp;
tse->no_call = no_call;
tse->trace_end = trace_end;
tse->non_call = false;
tse->db_id = 0;
return 0;
}
static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
u64 ret_addr, u64 timestamp, u64 ref,
struct symbol *sym)
{
int err;
if (!ts->cnt)
return 1;
if (ts->cnt == 1) {
struct thread_stack_entry *tse = &ts->stack[0];
if (tse->cp->sym == sym)
return thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
}
if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
!ts->stack[ts->cnt - 1].non_call) {
return thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
} else {
size_t i = ts->cnt - 1;
while (i--) {
if (ts->stack[i].ret_addr != ret_addr ||
ts->stack[i].non_call)
continue;
i += 1;
while (ts->cnt > i) {
err = thread_stack__call_return(thread, ts,
--ts->cnt,
timestamp, ref,
true);
if (err)
return err;
}
return thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
}
}
return 1;
}
static int thread_stack__bottom(struct thread_stack *ts,
struct perf_sample *sample,
struct addr_location *from_al,
struct addr_location *to_al, u64 ref)
{
struct call_path_root *cpr = ts->crp->cpr;
struct call_path *cp;
struct symbol *sym;
u64 ip;
if (sample->ip) {
ip = sample->ip;
sym = from_al->sym;
} else if (sample->addr) {
ip = sample->addr;
sym = to_al->sym;
} else {
return 0;
}
cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
ts->kernel_start);
return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
true, false);
}
static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
struct perf_sample *sample, u64 ref)
{
u64 tm = sample->time;
int err;
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
tm, ref, true);
if (err)
return err;
}
return 0;
}
static int thread_stack__no_call_return(struct thread *thread,
struct thread_stack *ts,
struct perf_sample *sample,
struct addr_location *from_al,
struct addr_location *to_al, u64 ref)
{
struct call_path_root *cpr = ts->crp->cpr;
struct call_path *root = &cpr->call_path;
struct symbol *fsym = from_al->sym;
struct symbol *tsym = to_al->sym;
struct call_path *cp, *parent;
u64 ks = ts->kernel_start;
u64 addr = sample->addr;
u64 tm = sample->time;
u64 ip = sample->ip;
int err;
if (ip >= ks && addr < ks) {
/* Return to userspace, so pop all kernel addresses */
err = thread_stack__pop_ks(thread, ts, sample, ref);
if (err)
return err;
/* If the stack is empty, push the userspace address */
if (!ts->cnt) {
cp = call_path__findnew(cpr, root, tsym, addr, ks);
return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
false);
}
} else if (thread_stack__in_kernel(ts) && ip < ks) {
/* Return to userspace, so pop all kernel addresses */
err = thread_stack__pop_ks(thread, ts, sample, ref);
if (err)
return err;
}
if (ts->cnt)
parent = ts->stack[ts->cnt - 1].cp;
else
parent = root;
if (parent->sym == from_al->sym) {
/*
* At the bottom of the stack, assume the missing 'call' was
* before the trace started. So, pop the current symbol and push
* the 'to' symbol.
*/
if (ts->cnt == 1) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
tm, ref, false);
if (err)
return err;
}
if (!ts->cnt) {
cp = call_path__findnew(cpr, root, tsym, addr, ks);
return thread_stack__push_cp(ts, addr, tm, ref, cp,
true, false);
}
/*
* Otherwise assume the 'return' is being used as a jump (e.g.
* retpoline) and just push the 'to' symbol.
*/
cp = call_path__findnew(cpr, parent, tsym, addr, ks);
err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
if (!err)
ts->stack[ts->cnt - 1].non_call = true;
return err;
}
/*
* Assume 'parent' has not yet returned, so push 'to', and then push and
* pop 'from'.
*/
cp = call_path__findnew(cpr, parent, tsym, addr, ks);
err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
if (err)
return err;
cp = call_path__findnew(cpr, cp, fsym, ip, ks);
err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
if (err)
return err;
return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
}
static int thread_stack__trace_begin(struct thread *thread,
struct thread_stack *ts, u64 timestamp,
u64 ref)
{
struct thread_stack_entry *tse;
int err;
if (!ts->cnt)
return 0;
/* Pop trace end */
tse = &ts->stack[ts->cnt - 1];
if (tse->trace_end) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
if (err)
return err;
}
return 0;
}
static int thread_stack__trace_end(struct thread_stack *ts,
struct perf_sample *sample, u64 ref)
{
struct call_path_root *cpr = ts->crp->cpr;
struct call_path *cp;
u64 ret_addr;
/* No point having 'trace end' on the bottom of the stack */
if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
return 0;
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
ts->kernel_start);
ret_addr = sample->ip + sample->insn_len;
return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
false, true);
}
static bool is_x86_retpoline(const char *name)
{
return strstr(name, "__x86_indirect_thunk_") == name;
}
/*
* x86 retpoline functions pollute the call graph. This function removes them.
* This does not handle function return thunks, nor is there any improvement
* for the handling of inline thunks or extern thunks.
*/
static int thread_stack__x86_retpoline(struct thread_stack *ts,
struct perf_sample *sample,
struct addr_location *to_al)
{
struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
struct call_path_root *cpr = ts->crp->cpr;
struct symbol *sym = tse->cp->sym;
struct symbol *tsym = to_al->sym;
struct call_path *cp;
if (sym && is_x86_retpoline(sym->name)) {
/*
* This is a x86 retpoline fn. It pollutes the call graph by
* showing up everywhere there is an indirect branch, but does
* not itself mean anything. Here the top-of-stack is removed,
* by decrementing the stack count, and then further down, the
* resulting top-of-stack is replaced with the actual target.
* The result is that the retpoline functions will no longer
* appear in the call graph. Note this only affects the call
* graph, since all the original branches are left unchanged.
*/
ts->cnt -= 1;
sym = ts->stack[ts->cnt - 2].cp->sym;
if (sym && sym == tsym && to_al->addr != tsym->start) {
/*
* Target is back to the middle of the symbol we came
* from so assume it is an indirect jmp and forget it
* altogether.
*/
ts->cnt -= 1;
return 0;
}
} else if (sym && sym == tsym) {
/*
* Target is back to the symbol we came from so assume it is an
* indirect jmp and forget it altogether.
*/
ts->cnt -= 1;
return 0;
}
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
sample->addr, ts->kernel_start);
if (!cp)
return -ENOMEM;
/* Replace the top-of-stack with the actual target */
ts->stack[ts->cnt - 1].cp = cp;
return 0;
}
int thread_stack__process(struct thread *thread, struct comm *comm,
struct perf_sample *sample,
struct addr_location *from_al,
struct addr_location *to_al, u64 ref,
struct call_return_processor *crp)
{
struct thread_stack *ts = thread__stack(thread, sample->cpu);
enum retpoline_state_t rstate;
int err = 0;
if (ts && !ts->crp) {
/* Supersede thread_stack__event() */
thread_stack__reset(thread, ts);
ts = NULL;
}
if (!ts) {
ts = thread_stack__new(thread, sample->cpu, crp, true, 0);
if (!ts)
return -ENOMEM;
ts->comm = comm;
}
rstate = ts->rstate;
if (rstate == X86_RETPOLINE_DETECTED)
ts->rstate = X86_RETPOLINE_POSSIBLE;
/* Flush stack on exec */
if (ts->comm != comm && thread__pid(thread) == thread__tid(thread)) {
err = __thread_stack__flush(thread, ts);
if (err)
return err;
ts->comm = comm;
}
/* If the stack is empty, put the current symbol on the stack */
if (!ts->cnt) {
err = thread_stack__bottom(ts, sample, from_al, to_al, ref);
if (err)
return err;
}
ts->branch_count += 1;
ts->insn_count += sample->insn_cnt;
ts->cyc_count += sample->cyc_cnt;
ts->last_time = sample->time;
if (sample->flags & PERF_IP_FLAG_CALL) {
bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
struct call_path_root *cpr = ts->crp->cpr;
struct call_path *cp;
u64 ret_addr;
if (!sample->ip || !sample->addr)
return 0;
ret_addr = sample->ip + sample->insn_len;
if (ret_addr == sample->addr)
return 0; /* Zero-length calls are excluded */
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
to_al->sym, sample->addr,
ts->kernel_start);
err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
cp, false, trace_end);
/*
* A call to the same symbol but not the start of the symbol,
* may be the start of a x86 retpoline.
*/
if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
from_al->sym == to_al->sym &&
to_al->addr != to_al->sym->start)
ts->rstate = X86_RETPOLINE_DETECTED;
} else if (sample->flags & PERF_IP_FLAG_RETURN) {
if (!sample->addr) {
u32 return_from_kernel = PERF_IP_FLAG_SYSCALLRET |
PERF_IP_FLAG_INTERRUPT;
if (!(sample->flags & return_from_kernel))
return 0;
/* Pop kernel stack */
return thread_stack__pop_ks(thread, ts, sample, ref);
}
if (!sample->ip)
return 0;
/* x86 retpoline 'return' doesn't match the stack */
if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
ts->stack[ts->cnt - 1].ret_addr != sample->addr)
return thread_stack__x86_retpoline(ts, sample, to_al);
err = thread_stack__pop_cp(thread, ts, sample->addr,
sample->time, ref, from_al->sym);
if (err) {
if (err < 0)
return err;
err = thread_stack__no_call_return(thread, ts, sample,
from_al, to_al, ref);
}
} else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
err = thread_stack__trace_begin(thread, ts, sample->time, ref);
} else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
err = thread_stack__trace_end(ts, sample, ref);
} else if (sample->flags & PERF_IP_FLAG_BRANCH &&
from_al->sym != to_al->sym && to_al->sym &&
to_al->addr == to_al->sym->start) {
struct call_path_root *cpr = ts->crp->cpr;
struct call_path *cp;
/*
* The compiler might optimize a call/ret combination by making
* it a jmp. Make that visible by recording on the stack a
* branch to the start of a different symbol. Note, that means
* when a ret pops the stack, all jmps must be popped off first.
*/
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
to_al->sym, sample->addr,
ts->kernel_start);
err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
false);
if (!err)
ts->stack[ts->cnt - 1].non_call = true;
}
return err;
}
size_t thread_stack__depth(struct thread *thread, int cpu)
{
struct thread_stack *ts = thread__stack(thread, cpu);
if (!ts)
return 0;
return ts->cnt;
}
| linux-master | tools/perf/util/thread-stack.c |
// SPDX-License-Identifier: GPL-2.0
#include "sharded_mutex.h"
#include <stdlib.h>
struct sharded_mutex *sharded_mutex__new(size_t num_shards)
{
struct sharded_mutex *result;
size_t size;
unsigned int bits;
for (bits = 0; ((size_t)1 << bits) < num_shards; bits++)
;
size = sizeof(*result) + sizeof(struct mutex) * (1 << bits);
result = malloc(size);
if (!result)
return NULL;
result->cap_bits = bits;
for (size_t i = 0; i < ((size_t)1 << bits); i++)
mutex_init(&result->mutexes[i]);
return result;
}
void sharded_mutex__delete(struct sharded_mutex *sm)
{
for (size_t i = 0; i < ((size_t)1 << sm->cap_bits); i++)
mutex_destroy(&sm->mutexes[i]);
free(sm);
}
| linux-master | tools/perf/util/sharded_mutex.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <linux/err.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <asm/bug.h>
#include <dirent.h>
#include "data.h"
#include "util.h" // rm_rf_perf_data()
#include "debug.h"
#include "header.h"
#include <internal/lib.h>
static void close_dir(struct perf_data_file *files, int nr)
{
while (--nr >= 0) {
close(files[nr].fd);
zfree(&files[nr].path);
}
free(files);
}
void perf_data__close_dir(struct perf_data *data)
{
close_dir(data->dir.files, data->dir.nr);
}
int perf_data__create_dir(struct perf_data *data, int nr)
{
struct perf_data_file *files = NULL;
int i, ret;
if (WARN_ON(!data->is_dir))
return -EINVAL;
files = zalloc(nr * sizeof(*files));
if (!files)
return -ENOMEM;
for (i = 0; i < nr; i++) {
struct perf_data_file *file = &files[i];
ret = asprintf(&file->path, "%s/data.%d", data->path, i);
if (ret < 0) {
ret = -ENOMEM;
goto out_err;
}
ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
if (ret < 0) {
ret = -errno;
goto out_err;
}
file->fd = ret;
}
data->dir.version = PERF_DIR_VERSION;
data->dir.files = files;
data->dir.nr = nr;
return 0;
out_err:
close_dir(files, i);
return ret;
}
int perf_data__open_dir(struct perf_data *data)
{
struct perf_data_file *files = NULL;
struct dirent *dent;
int ret = -1;
DIR *dir;
int nr = 0;
/*
* Directory containing a single regular perf data file which is already
* open, means there is nothing more to do here.
*/
if (perf_data__is_single_file(data))
return 0;
if (WARN_ON(!data->is_dir))
return -EINVAL;
/* The version is provided by DIR_FORMAT feature. */
if (WARN_ON(data->dir.version != PERF_DIR_VERSION))
return -1;
dir = opendir(data->path);
if (!dir)
return -EINVAL;
while ((dent = readdir(dir)) != NULL) {
struct perf_data_file *file;
char path[PATH_MAX];
struct stat st;
snprintf(path, sizeof(path), "%s/%s", data->path, dent->d_name);
if (stat(path, &st))
continue;
if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data.", 5))
continue;
ret = -ENOMEM;
file = realloc(files, (nr + 1) * sizeof(*files));
if (!file)
goto out_err;
files = file;
file = &files[nr++];
file->path = strdup(path);
if (!file->path)
goto out_err;
ret = open(file->path, O_RDONLY);
if (ret < 0)
goto out_err;
file->fd = ret;
file->size = st.st_size;
}
closedir(dir);
if (!files)
return -EINVAL;
data->dir.files = files;
data->dir.nr = nr;
return 0;
out_err:
closedir(dir);
close_dir(files, nr);
return ret;
}
int perf_data__update_dir(struct perf_data *data)
{
int i;
if (WARN_ON(!data->is_dir))
return -EINVAL;
for (i = 0; i < data->dir.nr; i++) {
struct perf_data_file *file = &data->dir.files[i];
struct stat st;
if (fstat(file->fd, &st))
return -1;
file->size = st.st_size;
}
return 0;
}
static bool check_pipe(struct perf_data *data)
{
struct stat st;
bool is_pipe = false;
int fd = perf_data__is_read(data) ?
STDIN_FILENO : STDOUT_FILENO;
if (!data->path) {
if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
is_pipe = true;
} else {
if (!strcmp(data->path, "-"))
is_pipe = true;
}
if (is_pipe) {
if (data->use_stdio) {
const char *mode;
mode = perf_data__is_read(data) ? "r" : "w";
data->file.fptr = fdopen(fd, mode);
if (data->file.fptr == NULL) {
data->file.fd = fd;
data->use_stdio = false;
}
} else {
data->file.fd = fd;
}
}
return data->is_pipe = is_pipe;
}
static int check_backup(struct perf_data *data)
{
struct stat st;
if (perf_data__is_read(data))
return 0;
if (!stat(data->path, &st) && st.st_size) {
char oldname[PATH_MAX];
int ret;
snprintf(oldname, sizeof(oldname), "%s.old",
data->path);
ret = rm_rf_perf_data(oldname);
if (ret) {
pr_err("Can't remove old data: %s (%s)\n",
ret == -2 ?
"Unknown file found" : strerror(errno),
oldname);
return -1;
}
if (rename(data->path, oldname)) {
pr_err("Can't move data: %s (%s to %s)\n",
strerror(errno),
data->path, oldname);
return -1;
}
}
return 0;
}
static bool is_dir(struct perf_data *data)
{
struct stat st;
if (stat(data->path, &st))
return false;
return (st.st_mode & S_IFMT) == S_IFDIR;
}
static int open_file_read(struct perf_data *data)
{
int flags = data->in_place_update ? O_RDWR : O_RDONLY;
struct stat st;
int fd;
char sbuf[STRERR_BUFSIZE];
fd = open(data->file.path, flags);
if (fd < 0) {
int err = errno;
pr_err("failed to open %s: %s", data->file.path,
str_error_r(err, sbuf, sizeof(sbuf)));
if (err == ENOENT && !strcmp(data->file.path, "perf.data"))
pr_err(" (try 'perf record' first)");
pr_err("\n");
return -err;
}
if (fstat(fd, &st) < 0)
goto out_close;
if (!data->force && st.st_uid && (st.st_uid != geteuid())) {
pr_err("File %s not owned by current user or root (use -f to override)\n",
data->file.path);
goto out_close;
}
if (!st.st_size) {
pr_info("zero-sized data (%s), nothing to do!\n",
data->file.path);
goto out_close;
}
data->file.size = st.st_size;
return fd;
out_close:
close(fd);
return -1;
}
static int open_file_write(struct perf_data *data)
{
int fd;
char sbuf[STRERR_BUFSIZE];
fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
S_IRUSR|S_IWUSR);
if (fd < 0)
pr_err("failed to open %s : %s\n", data->file.path,
str_error_r(errno, sbuf, sizeof(sbuf)));
return fd;
}
static int open_file(struct perf_data *data)
{
int fd;
fd = perf_data__is_read(data) ?
open_file_read(data) : open_file_write(data);
if (fd < 0) {
zfree(&data->file.path);
return -1;
}
data->file.fd = fd;
return 0;
}
static int open_file_dup(struct perf_data *data)
{
data->file.path = strdup(data->path);
if (!data->file.path)
return -ENOMEM;
return open_file(data);
}
static int open_dir(struct perf_data *data)
{
int ret;
/*
* So far we open only the header, so we can read the data version and
* layout.
*/
if (asprintf(&data->file.path, "%s/data", data->path) < 0)
return -1;
if (perf_data__is_write(data) &&
mkdir(data->path, S_IRWXU) < 0)
return -1;
ret = open_file(data);
/* Cleanup whatever we managed to create so far. */
if (ret && perf_data__is_write(data))
rm_rf_perf_data(data->path);
return ret;
}
int perf_data__open(struct perf_data *data)
{
if (check_pipe(data))
return 0;
/* currently it allows stdio for pipe only */
data->use_stdio = false;
if (!data->path)
data->path = "perf.data";
if (check_backup(data))
return -1;
if (perf_data__is_read(data))
data->is_dir = is_dir(data);
return perf_data__is_dir(data) ?
open_dir(data) : open_file_dup(data);
}
void perf_data__close(struct perf_data *data)
{
if (perf_data__is_dir(data))
perf_data__close_dir(data);
zfree(&data->file.path);
if (data->use_stdio)
fclose(data->file.fptr);
else
close(data->file.fd);
}
ssize_t perf_data__read(struct perf_data *data, void *buf, size_t size)
{
if (data->use_stdio) {
if (fread(buf, size, 1, data->file.fptr) == 1)
return size;
return feof(data->file.fptr) ? 0 : -1;
}
return readn(data->file.fd, buf, size);
}
ssize_t perf_data_file__write(struct perf_data_file *file,
void *buf, size_t size)
{
return writen(file->fd, buf, size);
}
ssize_t perf_data__write(struct perf_data *data,
void *buf, size_t size)
{
if (data->use_stdio) {
if (fwrite(buf, size, 1, data->file.fptr) == 1)
return size;
return -1;
}
return perf_data_file__write(&data->file, buf, size);
}
int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit,
char **new_filepath)
{
int ret;
if (check_pipe(data))
return -EINVAL;
if (perf_data__is_read(data))
return -EINVAL;
if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
if (rename(data->path, *new_filepath))
pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
if (!at_exit) {
close(data->file.fd);
ret = perf_data__open(data);
if (ret < 0)
goto out;
if (lseek(data->file.fd, pos, SEEK_SET) == (off_t)-1) {
ret = -errno;
pr_debug("Failed to lseek to %zu: %s",
pos, strerror(errno));
goto out;
}
}
ret = data->file.fd;
out:
return ret;
}
unsigned long perf_data__size(struct perf_data *data)
{
u64 size = data->file.size;
int i;
if (perf_data__is_single_file(data))
return size;
for (i = 0; i < data->dir.nr; i++) {
struct perf_data_file *file = &data->dir.files[i];
size += file->size;
}
return size;
}
int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz)
{
int ret;
if (!data->is_dir)
return -1;
ret = snprintf(buf, buf_sz, "%s/kcore_dir", data->path);
if (ret < 0 || (size_t)ret >= buf_sz)
return -1;
return mkdir(buf, S_IRWXU);
}
bool has_kcore_dir(const char *path)
{
struct dirent *d = ERR_PTR(-EINVAL);
const char *name = "kcore_dir";
DIR *dir = opendir(path);
size_t n = strlen(name);
bool result = false;
if (dir) {
while (d && !result) {
d = readdir(dir);
result = d ? strncmp(d->d_name, name, n) : false;
}
closedir(dir);
}
return result;
}
char *perf_data__kallsyms_name(struct perf_data *data)
{
char *kallsyms_name;
struct stat st;
if (!data->is_dir)
return NULL;
if (asprintf(&kallsyms_name, "%s/kcore_dir/kallsyms", data->path) < 0)
return NULL;
if (stat(kallsyms_name, &st)) {
free(kallsyms_name);
return NULL;
}
return kallsyms_name;
}
char *perf_data__guest_kallsyms_name(struct perf_data *data, pid_t machine_pid)
{
char *kallsyms_name;
struct stat st;
if (!data->is_dir)
return NULL;
if (asprintf(&kallsyms_name, "%s/kcore_dir__%d/kallsyms", data->path, machine_pid) < 0)
return NULL;
if (stat(kallsyms_name, &st)) {
free(kallsyms_name);
return NULL;
}
return kallsyms_name;
}
bool is_perf_data(const char *path)
{
bool ret = false;
FILE *file;
u64 magic;
file = fopen(path, "r");
if (!file)
return false;
if (fread(&magic, 1, 8, file) < 8)
goto out;
ret = is_perf_magic(magic);
out:
fclose(file);
return ret;
}
| linux-master | tools/perf/util/data.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <assert.h>
#include <limits.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/time.h>
#include <linux/err.h>
#include <linux/zalloc.h>
#include <api/fs/fs.h>
#include <perf/bpf_perf.h>
#include "bpf_counter.h"
#include "bpf-utils.h"
#include "counts.h"
#include "debug.h"
#include "evsel.h"
#include "evlist.h"
#include "target.h"
#include "cgroup.h"
#include "cpumap.h"
#include "thread_map.h"
#include "bpf_skel/bpf_prog_profiler.skel.h"
#include "bpf_skel/bperf_u.h"
#include "bpf_skel/bperf_leader.skel.h"
#include "bpf_skel/bperf_follower.skel.h"
#define ATTR_MAP_SIZE 16
static inline void *u64_to_ptr(__u64 ptr)
{
return (void *)(unsigned long)ptr;
}
static struct bpf_counter *bpf_counter_alloc(void)
{
struct bpf_counter *counter;
counter = zalloc(sizeof(*counter));
if (counter)
INIT_LIST_HEAD(&counter->list);
return counter;
}
static int bpf_program_profiler__destroy(struct evsel *evsel)
{
struct bpf_counter *counter, *tmp;
list_for_each_entry_safe(counter, tmp,
&evsel->bpf_counter_list, list) {
list_del_init(&counter->list);
bpf_prog_profiler_bpf__destroy(counter->skel);
free(counter);
}
assert(list_empty(&evsel->bpf_counter_list));
return 0;
}
static char *bpf_target_prog_name(int tgt_fd)
{
struct bpf_func_info *func_info;
struct perf_bpil *info_linear;
const struct btf_type *t;
struct btf *btf = NULL;
char *name = NULL;
info_linear = get_bpf_prog_info_linear(tgt_fd, 1UL << PERF_BPIL_FUNC_INFO);
if (IS_ERR_OR_NULL(info_linear)) {
pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
return NULL;
}
if (info_linear->info.btf_id == 0) {
pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
goto out;
}
btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
if (libbpf_get_error(btf)) {
pr_debug("failed to load btf for prog FD %d\n", tgt_fd);
goto out;
}
func_info = u64_to_ptr(info_linear->info.func_info);
t = btf__type_by_id(btf, func_info[0].type_id);
if (!t) {
pr_debug("btf %d doesn't have type %d\n",
info_linear->info.btf_id, func_info[0].type_id);
goto out;
}
name = strdup(btf__name_by_offset(btf, t->name_off));
out:
btf__free(btf);
free(info_linear);
return name;
}
static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
{
struct bpf_prog_profiler_bpf *skel;
struct bpf_counter *counter;
struct bpf_program *prog;
char *prog_name;
int prog_fd;
int err;
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0) {
pr_err("Failed to open fd for bpf prog %u\n", prog_id);
return -1;
}
counter = bpf_counter_alloc();
if (!counter) {
close(prog_fd);
return -1;
}
skel = bpf_prog_profiler_bpf__open();
if (!skel) {
pr_err("Failed to open bpf skeleton\n");
goto err_out;
}
skel->rodata->num_cpu = evsel__nr_cpus(evsel);
bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
bpf_map__set_max_entries(skel->maps.accum_readings, 1);
prog_name = bpf_target_prog_name(prog_fd);
if (!prog_name) {
pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
goto err_out;
}
bpf_object__for_each_program(prog, skel->obj) {
err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
if (err) {
pr_err("bpf_program__set_attach_target failed.\n"
"Does bpf prog %u have BTF?\n", prog_id);
goto err_out;
}
}
set_max_rlimit();
err = bpf_prog_profiler_bpf__load(skel);
if (err) {
pr_err("bpf_prog_profiler_bpf__load failed\n");
goto err_out;
}
assert(skel != NULL);
counter->skel = skel;
list_add(&counter->list, &evsel->bpf_counter_list);
close(prog_fd);
return 0;
err_out:
bpf_prog_profiler_bpf__destroy(skel);
free(counter);
close(prog_fd);
return -1;
}
static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
{
char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
u32 prog_id;
int ret;
bpf_str_ = bpf_str = strdup(target->bpf_str);
if (!bpf_str)
return -1;
while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
prog_id = strtoul(tok, &p, 10);
if (prog_id == 0 || prog_id == UINT_MAX ||
(*p != '\0' && *p != ',')) {
pr_err("Failed to parse bpf prog ids %s\n",
target->bpf_str);
return -1;
}
ret = bpf_program_profiler_load_one(evsel, prog_id);
if (ret) {
bpf_program_profiler__destroy(evsel);
free(bpf_str_);
return -1;
}
bpf_str = NULL;
}
free(bpf_str_);
return 0;
}
static int bpf_program_profiler__enable(struct evsel *evsel)
{
struct bpf_counter *counter;
int ret;
list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
assert(counter->skel != NULL);
ret = bpf_prog_profiler_bpf__attach(counter->skel);
if (ret) {
bpf_program_profiler__destroy(evsel);
return ret;
}
}
return 0;
}
static int bpf_program_profiler__disable(struct evsel *evsel)
{
struct bpf_counter *counter;
list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
assert(counter->skel != NULL);
bpf_prog_profiler_bpf__detach(counter->skel);
}
return 0;
}
static int bpf_program_profiler__read(struct evsel *evsel)
{
// BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
// Sometimes possible > online, like on a Ryzen 3900X that has 24
// threads but its possible showed 0-31 -acme
int num_cpu_bpf = libbpf_num_possible_cpus();
struct bpf_perf_event_value values[num_cpu_bpf];
struct bpf_counter *counter;
struct perf_counts_values *counts;
int reading_map_fd;
__u32 key = 0;
int err, idx, bpf_cpu;
if (list_empty(&evsel->bpf_counter_list))
return -EAGAIN;
perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
counts = perf_counts(evsel->counts, idx, 0);
counts->val = 0;
counts->ena = 0;
counts->run = 0;
}
list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
struct bpf_prog_profiler_bpf *skel = counter->skel;
assert(skel != NULL);
reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
err = bpf_map_lookup_elem(reading_map_fd, &key, values);
if (err) {
pr_err("failed to read value\n");
return err;
}
for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
idx = perf_cpu_map__idx(evsel__cpus(evsel),
(struct perf_cpu){.cpu = bpf_cpu});
if (idx == -1)
continue;
counts = perf_counts(evsel->counts, idx, 0);
counts->val += values[bpf_cpu].counter;
counts->ena += values[bpf_cpu].enabled;
counts->run += values[bpf_cpu].running;
}
}
return 0;
}
static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx,
int fd)
{
struct bpf_prog_profiler_bpf *skel;
struct bpf_counter *counter;
int ret;
list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
skel = counter->skel;
assert(skel != NULL);
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
&cpu_map_idx, &fd, BPF_ANY);
if (ret)
return ret;
}
return 0;
}
struct bpf_counter_ops bpf_program_profiler_ops = {
.load = bpf_program_profiler__load,
.enable = bpf_program_profiler__enable,
.disable = bpf_program_profiler__disable,
.read = bpf_program_profiler__read,
.destroy = bpf_program_profiler__destroy,
.install_pe = bpf_program_profiler__install_pe,
};
static bool bperf_attr_map_compatible(int attr_map_fd)
{
struct bpf_map_info map_info = {0};
__u32 map_info_len = sizeof(map_info);
int err;
err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
if (err)
return false;
return (map_info.key_size == sizeof(struct perf_event_attr)) &&
(map_info.value_size == sizeof(struct perf_event_attr_map_entry));
}
static int bperf_lock_attr_map(struct target *target)
{
char path[PATH_MAX];
int map_fd, err;
if (target->attr_map) {
scnprintf(path, PATH_MAX, "%s", target->attr_map);
} else {
scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
BPF_PERF_DEFAULT_ATTR_MAP_PATH);
}
if (access(path, F_OK)) {
map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
sizeof(struct perf_event_attr),
sizeof(struct perf_event_attr_map_entry),
ATTR_MAP_SIZE, NULL);
if (map_fd < 0)
return -1;
err = bpf_obj_pin(map_fd, path);
if (err) {
/* someone pinned the map in parallel? */
close(map_fd);
map_fd = bpf_obj_get(path);
if (map_fd < 0)
return -1;
}
} else {
map_fd = bpf_obj_get(path);
if (map_fd < 0)
return -1;
}
if (!bperf_attr_map_compatible(map_fd)) {
close(map_fd);
return -1;
}
err = flock(map_fd, LOCK_EX);
if (err) {
close(map_fd);
return -1;
}
return map_fd;
}
static int bperf_check_target(struct evsel *evsel,
struct target *target,
enum bperf_filter_type *filter_type,
__u32 *filter_entry_cnt)
{
if (evsel->core.leader->nr_members > 1) {
pr_err("bpf managed perf events do not yet support groups.\n");
return -1;
}
/* determine filter type based on target */
if (target->system_wide) {
*filter_type = BPERF_FILTER_GLOBAL;
*filter_entry_cnt = 1;
} else if (target->cpu_list) {
*filter_type = BPERF_FILTER_CPU;
*filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
} else if (target->tid) {
*filter_type = BPERF_FILTER_PID;
*filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
} else if (target->pid || evsel->evlist->workload.pid != -1) {
*filter_type = BPERF_FILTER_TGID;
*filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
} else {
pr_err("bpf managed perf events do not yet support these targets.\n");
return -1;
}
return 0;
}
static struct perf_cpu_map *all_cpu_map;
static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
struct perf_event_attr_map_entry *entry)
{
struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
int link_fd, diff_map_fd, err;
struct bpf_link *link = NULL;
if (!skel) {
pr_err("Failed to open leader skeleton\n");
return -1;
}
bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
err = bperf_leader_bpf__load(skel);
if (err) {
pr_err("Failed to load leader skeleton\n");
goto out;
}
link = bpf_program__attach(skel->progs.on_switch);
if (IS_ERR(link)) {
pr_err("Failed to attach leader program\n");
err = PTR_ERR(link);
goto out;
}
link_fd = bpf_link__fd(link);
diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
entry->link_id = bpf_link_get_id(link_fd);
entry->diff_map_id = bpf_map_get_id(diff_map_fd);
err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
assert(err == 0);
evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
assert(evsel->bperf_leader_link_fd >= 0);
/*
* save leader_skel for install_pe, which is called within
* following evsel__open_per_cpu call
*/
evsel->leader_skel = skel;
evsel__open_per_cpu(evsel, all_cpu_map, -1);
out:
bperf_leader_bpf__destroy(skel);
bpf_link__destroy(link);
return err;
}
static int bperf__load(struct evsel *evsel, struct target *target)
{
struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
int attr_map_fd, diff_map_fd = -1, err;
enum bperf_filter_type filter_type;
__u32 filter_entry_cnt, i;
if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
return -1;
if (!all_cpu_map) {
all_cpu_map = perf_cpu_map__new(NULL);
if (!all_cpu_map)
return -1;
}
evsel->bperf_leader_prog_fd = -1;
evsel->bperf_leader_link_fd = -1;
/*
* Step 1: hold a fd on the leader program and the bpf_link, if
* the program is not already gone, reload the program.
* Use flock() to ensure exclusive access to the perf_event_attr
* map.
*/
attr_map_fd = bperf_lock_attr_map(target);
if (attr_map_fd < 0) {
pr_err("Failed to lock perf_event_attr map\n");
return -1;
}
err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
if (err) {
err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
if (err)
goto out;
}
evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
if (evsel->bperf_leader_link_fd < 0 &&
bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
err = -1;
goto out;
}
/*
* The bpf_link holds reference to the leader program, and the
* leader program holds reference to the maps. Therefore, if
* link_id is valid, diff_map_id should also be valid.
*/
evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
assert(evsel->bperf_leader_prog_fd >= 0);
diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
assert(diff_map_fd >= 0);
/*
* bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
* whether the kernel support it
*/
err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
if (err) {
pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
"Therefore, --use-bpf might show inaccurate readings\n");
goto out;
}
/* Step 2: load the follower skeleton */
evsel->follower_skel = bperf_follower_bpf__open();
if (!evsel->follower_skel) {
err = -1;
pr_err("Failed to open follower skeleton\n");
goto out;
}
/* attach fexit program to the leader program */
bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
evsel->bperf_leader_prog_fd, "on_switch");
/* connect to leader diff_reading map */
bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
/* set up reading map */
bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
filter_entry_cnt);
/* set up follower filter based on target */
bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
filter_entry_cnt);
err = bperf_follower_bpf__load(evsel->follower_skel);
if (err) {
pr_err("Failed to load follower skeleton\n");
bperf_follower_bpf__destroy(evsel->follower_skel);
evsel->follower_skel = NULL;
goto out;
}
for (i = 0; i < filter_entry_cnt; i++) {
int filter_map_fd;
__u32 key;
if (filter_type == BPERF_FILTER_PID ||
filter_type == BPERF_FILTER_TGID)
key = perf_thread_map__pid(evsel->core.threads, i);
else if (filter_type == BPERF_FILTER_CPU)
key = perf_cpu_map__cpu(evsel->core.cpus, i).cpu;
else
break;
filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
}
evsel->follower_skel->bss->type = filter_type;
err = bperf_follower_bpf__attach(evsel->follower_skel);
out:
if (err && evsel->bperf_leader_link_fd >= 0)
close(evsel->bperf_leader_link_fd);
if (err && evsel->bperf_leader_prog_fd >= 0)
close(evsel->bperf_leader_prog_fd);
if (diff_map_fd >= 0)
close(diff_map_fd);
flock(attr_map_fd, LOCK_UN);
close(attr_map_fd);
return err;
}
static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
{
struct bperf_leader_bpf *skel = evsel->leader_skel;
return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
&cpu_map_idx, &fd, BPF_ANY);
}
/*
* trigger the leader prog on each cpu, so the accum_reading map could get
* the latest readings.
*/
static int bperf_sync_counters(struct evsel *evsel)
{
int num_cpu, i, cpu;
num_cpu = perf_cpu_map__nr(all_cpu_map);
for (i = 0; i < num_cpu; i++) {
cpu = perf_cpu_map__cpu(all_cpu_map, i).cpu;
bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
}
return 0;
}
static int bperf__enable(struct evsel *evsel)
{
evsel->follower_skel->bss->enabled = 1;
return 0;
}
static int bperf__disable(struct evsel *evsel)
{
evsel->follower_skel->bss->enabled = 0;
return 0;
}
static int bperf__read(struct evsel *evsel)
{
struct bperf_follower_bpf *skel = evsel->follower_skel;
__u32 num_cpu_bpf = cpu__max_cpu().cpu;
struct bpf_perf_event_value values[num_cpu_bpf];
struct perf_counts_values *counts;
int reading_map_fd, err = 0;
__u32 i;
int j;
bperf_sync_counters(evsel);
reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
struct perf_cpu entry;
__u32 cpu;
err = bpf_map_lookup_elem(reading_map_fd, &i, values);
if (err)
goto out;
switch (evsel->follower_skel->bss->type) {
case BPERF_FILTER_GLOBAL:
assert(i == 0);
perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
counts = perf_counts(evsel->counts, j, 0);
counts->val = values[entry.cpu].counter;
counts->ena = values[entry.cpu].enabled;
counts->run = values[entry.cpu].running;
}
break;
case BPERF_FILTER_CPU:
cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
assert(cpu >= 0);
counts = perf_counts(evsel->counts, i, 0);
counts->val = values[cpu].counter;
counts->ena = values[cpu].enabled;
counts->run = values[cpu].running;
break;
case BPERF_FILTER_PID:
case BPERF_FILTER_TGID:
counts = perf_counts(evsel->counts, 0, i);
counts->val = 0;
counts->ena = 0;
counts->run = 0;
for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
counts->val += values[cpu].counter;
counts->ena += values[cpu].enabled;
counts->run += values[cpu].running;
}
break;
default:
break;
}
}
out:
return err;
}
static int bperf__destroy(struct evsel *evsel)
{
bperf_follower_bpf__destroy(evsel->follower_skel);
close(evsel->bperf_leader_prog_fd);
close(evsel->bperf_leader_link_fd);
return 0;
}
/*
* bperf: share hardware PMCs with BPF
*
* perf uses performance monitoring counters (PMC) to monitor system
* performance. The PMCs are limited hardware resources. For example,
* Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
*
* Modern data center systems use these PMCs in many different ways:
* system level monitoring, (maybe nested) container level monitoring, per
* process monitoring, profiling (in sample mode), etc. In some cases,
* there are more active perf_events than available hardware PMCs. To allow
* all perf_events to have a chance to run, it is necessary to do expensive
* time multiplexing of events.
*
* On the other hand, many monitoring tools count the common metrics
* (cycles, instructions). It is a waste to have multiple tools create
* multiple perf_events of "cycles" and occupy multiple PMCs.
*
* bperf tries to reduce such wastes by allowing multiple perf_events of
* "cycles" or "instructions" (at different scopes) to share PMUs. Instead
* of having each perf-stat session to read its own perf_events, bperf uses
* BPF programs to read the perf_events and aggregate readings to BPF maps.
* Then, the perf-stat session(s) reads the values from these BPF maps.
*
* ||
* shared progs and maps <- || -> per session progs and maps
* ||
* --------------- ||
* | perf_events | ||
* --------------- fexit || -----------------
* | --------||----> | follower prog |
* --------------- / || --- -----------------
* cs -> | leader prog |/ ||/ | |
* --> --------------- /|| -------------- ------------------
* / | | / || | filter map | | accum_readings |
* / ------------ ------------ || -------------- ------------------
* | | prev map | | diff map | || |
* | ------------ ------------ || |
* \ || |
* = \ ==================================================== | ============
* \ / user space
* \ /
* \ /
* BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM
* \ /
* \ /
* \------ perf-stat ----------------------/
*
* The figure above shows the architecture of bperf. Note that the figure
* is divided into 3 regions: shared progs and maps (top left), per session
* progs and maps (top right), and user space (bottom).
*
* The leader prog is triggered on each context switch (cs). The leader
* prog reads perf_events and stores the difference (current_reading -
* previous_reading) to the diff map. For the same metric, e.g. "cycles",
* multiple perf-stat sessions share the same leader prog.
*
* Each perf-stat session creates a follower prog as fexit program to the
* leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
* follower progs to the same leader prog. The follower prog checks current
* task and processor ID to decide whether to add the value from the diff
* map to its accumulated reading map (accum_readings).
*
* Finally, perf-stat user space reads the value from accum_reading map.
*
* Besides context switch, it is also necessary to trigger the leader prog
* before perf-stat reads the value. Otherwise, the accum_reading map may
* not have the latest reading from the perf_events. This is achieved by
* triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
*
* Comment before the definition of struct perf_event_attr_map_entry
* describes how different sessions of perf-stat share information about
* the leader prog.
*/
struct bpf_counter_ops bperf_ops = {
.load = bperf__load,
.enable = bperf__enable,
.disable = bperf__disable,
.read = bperf__read,
.install_pe = bperf__install_pe,
.destroy = bperf__destroy,
};
extern struct bpf_counter_ops bperf_cgrp_ops;
static inline bool bpf_counter_skip(struct evsel *evsel)
{
return evsel->bpf_counter_ops == NULL;
}
int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
{
if (bpf_counter_skip(evsel))
return 0;
return evsel->bpf_counter_ops->install_pe(evsel, cpu_map_idx, fd);
}
int bpf_counter__load(struct evsel *evsel, struct target *target)
{
if (target->bpf_str)
evsel->bpf_counter_ops = &bpf_program_profiler_ops;
else if (cgrp_event_expanded && target->use_bpf)
evsel->bpf_counter_ops = &bperf_cgrp_ops;
else if (target->use_bpf || evsel->bpf_counter ||
evsel__match_bpf_counter_events(evsel->name))
evsel->bpf_counter_ops = &bperf_ops;
if (evsel->bpf_counter_ops)
return evsel->bpf_counter_ops->load(evsel, target);
return 0;
}
int bpf_counter__enable(struct evsel *evsel)
{
if (bpf_counter_skip(evsel))
return 0;
return evsel->bpf_counter_ops->enable(evsel);
}
int bpf_counter__disable(struct evsel *evsel)
{
if (bpf_counter_skip(evsel))
return 0;
return evsel->bpf_counter_ops->disable(evsel);
}
int bpf_counter__read(struct evsel *evsel)
{
if (bpf_counter_skip(evsel))
return -EAGAIN;
return evsel->bpf_counter_ops->read(evsel);
}
void bpf_counter__destroy(struct evsel *evsel)
{
if (bpf_counter_skip(evsel))
return;
evsel->bpf_counter_ops->destroy(evsel);
evsel->bpf_counter_ops = NULL;
evsel->bpf_skel = NULL;
}
| linux-master | tools/perf/util/bpf_counter.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Manage a cache of file names' existence */
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <linux/list.h>
#include "fncache.h"
struct fncache {
struct hlist_node nd;
bool res;
char name[];
};
#define FNHSIZE 61
static struct hlist_head fncache_hash[FNHSIZE];
unsigned shash(const unsigned char *s)
{
unsigned h = 0;
while (*s)
h = 65599 * h + *s++;
return h ^ (h >> 16);
}
static bool lookup_fncache(const char *name, bool *res)
{
int h = shash((const unsigned char *)name) % FNHSIZE;
struct fncache *n;
hlist_for_each_entry(n, &fncache_hash[h], nd) {
if (!strcmp(n->name, name)) {
*res = n->res;
return true;
}
}
return false;
}
static void update_fncache(const char *name, bool res)
{
struct fncache *n = malloc(sizeof(struct fncache) + strlen(name) + 1);
int h = shash((const unsigned char *)name) % FNHSIZE;
if (!n)
return;
strcpy(n->name, name);
n->res = res;
hlist_add_head(&n->nd, &fncache_hash[h]);
}
/* No LRU, only use when bounded in some other way. */
bool file_available(const char *name)
{
bool res;
if (lookup_fncache(name, &res))
return res;
res = access(name, R_OK) == 0;
update_fncache(name, res);
return res;
}
| linux-master | tools/perf/util/fncache.c |
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <errno.h>
#include <limits.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "string2.h"
#include "strlist.h"
#include <string.h>
#include <api/fs/fs.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include "asm/bug.h"
#include "thread_map.h"
#include "debug.h"
#include "event.h"
#include <internal/threadmap.h>
/* Skip "." and ".." directories */
static int filter(const struct dirent *dir)
{
if (dir->d_name[0] == '.')
return 0;
else
return 1;
}
#define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
struct perf_thread_map *thread_map__new_by_pid(pid_t pid)
{
struct perf_thread_map *threads;
char name[256];
int items;
struct dirent **namelist = NULL;
int i;
sprintf(name, "/proc/%d/task", pid);
items = scandir(name, &namelist, filter, NULL);
if (items <= 0)
return NULL;
threads = thread_map__alloc(items);
if (threads != NULL) {
for (i = 0; i < items; i++)
perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
threads->nr = items;
refcount_set(&threads->refcnt, 1);
}
for (i=0; i<items; i++)
zfree(&namelist[i]);
free(namelist);
return threads;
}
struct perf_thread_map *thread_map__new_by_tid(pid_t tid)
{
struct perf_thread_map *threads = thread_map__alloc(1);
if (threads != NULL) {
perf_thread_map__set_pid(threads, 0, tid);
threads->nr = 1;
refcount_set(&threads->refcnt, 1);
}
return threads;
}
static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
{
DIR *proc;
int max_threads = 32, items, i;
char path[NAME_MAX + 1 + 6];
struct dirent *dirent, **namelist = NULL;
struct perf_thread_map *threads = thread_map__alloc(max_threads);
if (threads == NULL)
goto out;
proc = opendir("/proc");
if (proc == NULL)
goto out_free_threads;
threads->nr = 0;
refcount_set(&threads->refcnt, 1);
while ((dirent = readdir(proc)) != NULL) {
char *end;
bool grow = false;
pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
continue;
snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
if (uid != UINT_MAX) {
struct stat st;
if (stat(path, &st) != 0 || st.st_uid != uid)
continue;
}
snprintf(path, sizeof(path), "/proc/%d/task", pid);
items = scandir(path, &namelist, filter, NULL);
if (items <= 0)
goto out_free_closedir;
while (threads->nr + items >= max_threads) {
max_threads *= 2;
grow = true;
}
if (grow) {
struct perf_thread_map *tmp;
tmp = perf_thread_map__realloc(threads, max_threads);
if (tmp == NULL)
goto out_free_namelist;
threads = tmp;
}
for (i = 0; i < items; i++) {
perf_thread_map__set_pid(threads, threads->nr + i,
atoi(namelist[i]->d_name));
}
for (i = 0; i < items; i++)
zfree(&namelist[i]);
free(namelist);
threads->nr += items;
}
out_closedir:
closedir(proc);
out:
return threads;
out_free_threads:
free(threads);
return NULL;
out_free_namelist:
for (i = 0; i < items; i++)
zfree(&namelist[i]);
free(namelist);
out_free_closedir:
zfree(&threads);
goto out_closedir;
}
struct perf_thread_map *thread_map__new_all_cpus(void)
{
return __thread_map__new_all_cpus(UINT_MAX);
}
struct perf_thread_map *thread_map__new_by_uid(uid_t uid)
{
return __thread_map__new_all_cpus(uid);
}
struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
{
if (pid != -1)
return thread_map__new_by_pid(pid);
if (tid == -1 && uid != UINT_MAX)
return thread_map__new_by_uid(uid);
return thread_map__new_by_tid(tid);
}
static struct perf_thread_map *thread_map__new_by_pid_str(const char *pid_str)
{
struct perf_thread_map *threads = NULL, *nt;
char name[256];
int items, total_tasks = 0;
struct dirent **namelist = NULL;
int i, j = 0;
pid_t pid, prev_pid = INT_MAX;
char *end_ptr;
struct str_node *pos;
struct strlist_config slist_config = { .dont_dupstr = true, };
struct strlist *slist = strlist__new(pid_str, &slist_config);
if (!slist)
return NULL;
strlist__for_each_entry(pos, slist) {
pid = strtol(pos->s, &end_ptr, 10);
if (pid == INT_MIN || pid == INT_MAX ||
(*end_ptr != '\0' && *end_ptr != ','))
goto out_free_threads;
if (pid == prev_pid)
continue;
sprintf(name, "/proc/%d/task", pid);
items = scandir(name, &namelist, filter, NULL);
if (items <= 0)
goto out_free_threads;
total_tasks += items;
nt = perf_thread_map__realloc(threads, total_tasks);
if (nt == NULL)
goto out_free_namelist;
threads = nt;
for (i = 0; i < items; i++) {
perf_thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
zfree(&namelist[i]);
}
threads->nr = total_tasks;
free(namelist);
}
out:
strlist__delete(slist);
if (threads)
refcount_set(&threads->refcnt, 1);
return threads;
out_free_namelist:
for (i = 0; i < items; i++)
zfree(&namelist[i]);
free(namelist);
out_free_threads:
zfree(&threads);
goto out;
}
struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
{
struct perf_thread_map *threads = NULL, *nt;
int ntasks = 0;
pid_t tid, prev_tid = INT_MAX;
char *end_ptr;
struct str_node *pos;
struct strlist_config slist_config = { .dont_dupstr = true, };
struct strlist *slist;
/* perf-stat expects threads to be generated even if tid not given */
if (!tid_str)
return perf_thread_map__new_dummy();
slist = strlist__new(tid_str, &slist_config);
if (!slist)
return NULL;
strlist__for_each_entry(pos, slist) {
tid = strtol(pos->s, &end_ptr, 10);
if (tid == INT_MIN || tid == INT_MAX ||
(*end_ptr != '\0' && *end_ptr != ','))
goto out_free_threads;
if (tid == prev_tid)
continue;
ntasks++;
nt = perf_thread_map__realloc(threads, ntasks);
if (nt == NULL)
goto out_free_threads;
threads = nt;
perf_thread_map__set_pid(threads, ntasks - 1, tid);
threads->nr = ntasks;
}
out:
if (threads)
refcount_set(&threads->refcnt, 1);
return threads;
out_free_threads:
zfree(&threads);
strlist__delete(slist);
goto out;
}
struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid,
uid_t uid, bool all_threads)
{
if (pid)
return thread_map__new_by_pid_str(pid);
if (!tid && uid != UINT_MAX)
return thread_map__new_by_uid(uid);
if (all_threads)
return thread_map__new_all_cpus();
return thread_map__new_by_tid_str(tid);
}
size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp)
{
int i;
size_t printed = fprintf(fp, "%d thread%s: ",
threads->nr, threads->nr > 1 ? "s" : "");
for (i = 0; i < threads->nr; ++i)
printed += fprintf(fp, "%s%d", i ? ", " : "", perf_thread_map__pid(threads, i));
return printed + fprintf(fp, "\n");
}
static int get_comm(char **comm, pid_t pid)
{
char *path;
size_t size;
int err;
if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
return -ENOMEM;
err = filename__read_str(path, comm, &size);
if (!err) {
/*
* We're reading 16 bytes, while filename__read_str
* allocates data per BUFSIZ bytes, so we can safely
* mark the end of the string.
*/
(*comm)[size] = 0;
strim(*comm);
}
free(path);
return err;
}
static void comm_init(struct perf_thread_map *map, int i)
{
pid_t pid = perf_thread_map__pid(map, i);
char *comm = NULL;
/* dummy pid comm initialization */
if (pid == -1) {
map->map[i].comm = strdup("dummy");
return;
}
/*
* The comm name is like extra bonus ;-),
* so just warn if we fail for any reason.
*/
if (get_comm(&comm, pid))
pr_warning("Couldn't resolve comm name for pid %d\n", pid);
map->map[i].comm = comm;
}
void thread_map__read_comms(struct perf_thread_map *threads)
{
int i;
for (i = 0; i < threads->nr; ++i)
comm_init(threads, i);
}
static void thread_map__copy_event(struct perf_thread_map *threads,
struct perf_record_thread_map *event)
{
unsigned i;
threads->nr = (int) event->nr;
for (i = 0; i < event->nr; i++) {
perf_thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
threads->map[i].comm = strndup(event->entries[i].comm, 16);
}
refcount_set(&threads->refcnt, 1);
}
struct perf_thread_map *thread_map__new_event(struct perf_record_thread_map *event)
{
struct perf_thread_map *threads;
threads = thread_map__alloc(event->nr);
if (threads)
thread_map__copy_event(threads, event);
return threads;
}
bool thread_map__has(struct perf_thread_map *threads, pid_t pid)
{
int i;
for (i = 0; i < threads->nr; ++i) {
if (threads->map[i].pid == pid)
return true;
}
return false;
}
int thread_map__remove(struct perf_thread_map *threads, int idx)
{
int i;
if (threads->nr < 1)
return -EINVAL;
if (idx >= threads->nr)
return -EINVAL;
/*
* Free the 'idx' item and shift the rest up.
*/
zfree(&threads->map[idx].comm);
for (i = idx; i < threads->nr - 1; i++)
threads->map[i] = threads->map[i + 1];
threads->nr--;
return 0;
}
| linux-master | tools/perf/util/thread_map.c |
// SPDX-License-Identifier: GPL-2.0
#include <elf.h>
#include <inttypes.h>
#include <stdio.h>
#include "dso.h"
#include "map.h"
#include "symbol.h"
size_t symbol__fprintf(struct symbol *sym, FILE *fp)
{
return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
sym->start, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w',
sym->name);
}
size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al,
bool unknown_as_addr,
bool print_offsets, FILE *fp)
{
unsigned long offset;
size_t length;
if (sym) {
length = fprintf(fp, "%s", sym->name);
if (al && print_offsets) {
if (al->addr < sym->end)
offset = al->addr - sym->start;
else
offset = al->addr - map__start(al->map) - sym->start;
length += fprintf(fp, "+0x%lx", offset);
}
return length;
} else if (al && unknown_as_addr)
return fprintf(fp, "[%#" PRIx64 "]", al->addr);
else
return fprintf(fp, "[unknown]");
}
size_t symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al,
FILE *fp)
{
return __symbol__fprintf_symname_offs(sym, al, false, true, fp);
}
size_t __symbol__fprintf_symname(const struct symbol *sym,
const struct addr_location *al,
bool unknown_as_addr, FILE *fp)
{
return __symbol__fprintf_symname_offs(sym, al, unknown_as_addr, false, fp);
}
size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
{
return __symbol__fprintf_symname_offs(sym, NULL, false, false, fp);
}
size_t dso__fprintf_symbols_by_name(struct dso *dso,
FILE *fp)
{
size_t ret = 0;
for (size_t i = 0; i < dso->symbol_names_len; i++) {
struct symbol *pos = dso->symbol_names[i];
ret += fprintf(fp, "%s\n", pos->name);
}
return ret;
}
| linux-master | tools/perf/util/symbol_fprintf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2019
* Author(s): Thomas Richter <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Architecture specific trace_event function. Save event's bc000 raw data
* to file. File name is aux.ctr.## where ## stands for the CPU number the
* sample was taken from.
*/
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <sys/stat.h>
#include <linux/compiler.h>
#include <asm/byteorder.h>
#include "debug.h"
#include "session.h"
#include "evlist.h"
#include "color.h"
#include "sample-raw.h"
#include "s390-cpumcf-kernel.h"
#include "util/pmu.h"
#include "util/sample.h"
static size_t ctrset_size(struct cf_ctrset_entry *set)
{
return sizeof(*set) + set->ctr * sizeof(u64);
}
static bool ctrset_valid(struct cf_ctrset_entry *set)
{
return set->def == S390_CPUMCF_DIAG_DEF;
}
/* CPU Measurement Counter Facility raw data is a byte stream. It is 8 byte
* aligned and might have trailing padding bytes.
* Display the raw data on screen.
*/
static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
{
size_t len = sample->raw_size, offset = 0;
unsigned char *buf = sample->raw_data;
struct cf_trailer_entry *te;
struct cf_ctrset_entry *cep, ce;
if (!len)
return false;
while (offset < len) {
cep = (struct cf_ctrset_entry *)(buf + offset);
ce.def = be16_to_cpu(cep->def);
ce.set = be16_to_cpu(cep->set);
ce.ctr = be16_to_cpu(cep->ctr);
ce.res1 = be16_to_cpu(cep->res1);
if (!ctrset_valid(&ce) || offset + ctrset_size(&ce) > len) {
/* Raw data for counter sets are always multiple of 8
* bytes. Prepending a 4 bytes size field to the
* raw data block in the sample causes the perf tool
* to append 4 padding bytes to make the raw data part
* of the sample a multiple of eight bytes again.
*
* If the last entry (trailer) is 4 bytes off the raw
* area data end, all is good.
*/
if (len - offset - sizeof(*te) == 4)
break;
pr_err("Invalid counter set entry at %zd\n", offset);
return false;
}
offset += ctrset_size(&ce);
}
return true;
}
/* Dump event bc000 on screen, already tested on correctness. */
static void s390_cpumcfdg_dumptrail(const char *color, size_t offset,
struct cf_trailer_entry *tep)
{
struct cf_trailer_entry te;
te.flags = be64_to_cpu(tep->flags);
te.cfvn = be16_to_cpu(tep->cfvn);
te.csvn = be16_to_cpu(tep->csvn);
te.cpu_speed = be32_to_cpu(tep->cpu_speed);
te.timestamp = be64_to_cpu(tep->timestamp);
te.progusage1 = be64_to_cpu(tep->progusage1);
te.progusage2 = be64_to_cpu(tep->progusage2);
te.progusage3 = be64_to_cpu(tep->progusage3);
te.tod_base = be64_to_cpu(tep->tod_base);
te.mach_type = be16_to_cpu(tep->mach_type);
te.res1 = be16_to_cpu(tep->res1);
te.res2 = be32_to_cpu(tep->res2);
color_fprintf(stdout, color, " [%#08zx] Trailer:%c%c%c%c%c"
" Cfvn:%d Csvn:%d Speed:%d TOD:%#llx\n",
offset, te.clock_base ? 'T' : ' ',
te.speed ? 'S' : ' ', te.mtda ? 'M' : ' ',
te.caca ? 'C' : ' ', te.lcda ? 'L' : ' ',
te.cfvn, te.csvn, te.cpu_speed, te.timestamp);
color_fprintf(stdout, color, "\t\t1:%lx 2:%lx 3:%lx TOD-Base:%#llx"
" Type:%x\n\n",
te.progusage1, te.progusage2, te.progusage3,
te.tod_base, te.mach_type);
}
/* Return starting number of a counter set */
static int get_counterset_start(int setnr)
{
switch (setnr) {
case CPUMF_CTR_SET_BASIC: /* Basic counter set */
return 0;
case CPUMF_CTR_SET_USER: /* Problem state counter set */
return 32;
case CPUMF_CTR_SET_CRYPTO: /* Crypto counter set */
return 64;
case CPUMF_CTR_SET_EXT: /* Extended counter set */
return 128;
case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */
return 448;
default:
return -1;
}
}
struct get_counter_name_data {
int wanted;
char *result;
};
static int get_counter_name_callback(void *vdata, struct pmu_event_info *info)
{
struct get_counter_name_data *data = vdata;
int rc, event_nr;
const char *event_str;
if (info->str == NULL)
return 0;
event_str = strstr(info->str, "event=");
if (!event_str)
return 0;
rc = sscanf(event_str, "event=%x", &event_nr);
if (rc == 1 && event_nr == data->wanted) {
data->result = strdup(info->name);
return 1; /* Terminate the search. */
}
return 0;
}
/* Scan the PMU and extract the logical name of a counter from the event. Input
* is the counter set and counter number with in the set. Construct the event
* number and use this as key. If they match return the name of this counter.
* If no match is found a NULL pointer is returned.
*/
static char *get_counter_name(int set, int nr, struct perf_pmu *pmu)
{
struct get_counter_name_data data = {
.wanted = get_counterset_start(set) + nr,
.result = NULL,
};
if (!pmu)
return NULL;
perf_pmu__for_each_event(pmu, /*skip_duplicate_pmus=*/ true,
&data, get_counter_name_callback);
return data.result;
}
static void s390_cpumcfdg_dump(struct perf_pmu *pmu, struct perf_sample *sample)
{
size_t i, len = sample->raw_size, offset = 0;
unsigned char *buf = sample->raw_data;
const char *color = PERF_COLOR_BLUE;
struct cf_ctrset_entry *cep, ce;
u64 *p;
while (offset < len) {
cep = (struct cf_ctrset_entry *)(buf + offset);
ce.def = be16_to_cpu(cep->def);
ce.set = be16_to_cpu(cep->set);
ce.ctr = be16_to_cpu(cep->ctr);
ce.res1 = be16_to_cpu(cep->res1);
if (!ctrset_valid(&ce)) { /* Print trailer */
s390_cpumcfdg_dumptrail(color, offset,
(struct cf_trailer_entry *)cep);
return;
}
color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
" Counters:%d\n", offset, ce.set, ce.ctr);
for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
char *ev_name = get_counter_name(ce.set, i, pmu);
color_fprintf(stdout, color,
"\tCounter:%03d %s Value:%#018lx\n", i,
ev_name ?: "<unknown>", be64_to_cpu(*p));
free(ev_name);
}
offset += ctrset_size(&ce);
}
}
/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
* and if the event was triggered by a counter set diagnostic event display
* its raw data.
* The function is only invoked when the dump flag -D is set.
*/
void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
{
struct evsel *evsel;
if (event->header.type != PERF_RECORD_SAMPLE)
return;
evsel = evlist__event2evsel(evlist, event);
if (evsel == NULL ||
evsel->core.attr.config != PERF_EVENT_CPUM_CF_DIAG)
return;
/* Display raw data on screen */
if (!s390_cpumcfdg_testctr(sample)) {
pr_err("Invalid counter set data encountered\n");
return;
}
s390_cpumcfdg_dump(evsel->pmu, sample);
}
| linux-master | tools/perf/util/s390-sample-raw.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <string.h>
#include <linux/compiler.h>
#include <linux/perf_event.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include "event.h"
#include "synthetic-events.h"
#include "debug.h"
#include "tsc.h"
u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
{
u64 t, quot, rem;
t = ns - tc->time_zero;
quot = t / tc->time_mult;
rem = t % tc->time_mult;
return (quot << tc->time_shift) +
(rem << tc->time_shift) / tc->time_mult;
}
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
{
u64 quot, rem;
if (tc->cap_user_time_short)
cyc = tc->time_cycles +
((cyc - tc->time_cycles) & tc->time_mask);
quot = cyc >> tc->time_shift;
rem = cyc & (((u64)1 << tc->time_shift) - 1);
return tc->time_zero + quot * tc->time_mult +
((rem * tc->time_mult) >> tc->time_shift);
}
int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
struct perf_tsc_conversion *tc)
{
u32 seq;
int i = 0;
while (1) {
seq = pc->lock;
rmb();
tc->time_mult = pc->time_mult;
tc->time_shift = pc->time_shift;
tc->time_zero = pc->time_zero;
tc->time_cycles = pc->time_cycles;
tc->time_mask = pc->time_mask;
tc->cap_user_time_zero = pc->cap_user_time_zero;
tc->cap_user_time_short = pc->cap_user_time_short;
rmb();
if (pc->lock == seq && !(seq & 1))
break;
if (++i > 10000) {
pr_debug("failed to get perf_event_mmap_page lock\n");
return -EINVAL;
}
}
if (!tc->cap_user_time_zero)
return -EOPNOTSUPP;
return 0;
}
int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
union perf_event event = {
.time_conv = {
.header = {
.type = PERF_RECORD_TIME_CONV,
.size = sizeof(struct perf_record_time_conv),
},
},
};
struct perf_tsc_conversion tc;
int err;
if (!pc)
return 0;
err = perf_read_tsc_conversion(pc, &tc);
if (err == -EOPNOTSUPP)
return 0;
if (err)
return err;
pr_debug2("Synthesizing TSC conversion information\n");
event.time_conv.time_mult = tc.time_mult;
event.time_conv.time_shift = tc.time_shift;
event.time_conv.time_zero = tc.time_zero;
event.time_conv.time_cycles = tc.time_cycles;
event.time_conv.time_mask = tc.time_mask;
event.time_conv.cap_user_time_zero = tc.cap_user_time_zero;
event.time_conv.cap_user_time_short = tc.cap_user_time_short;
return process(tool, &event, NULL, machine);
}
u64 __weak rdtsc(void)
{
return 0;
}
size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp)
{
struct perf_record_time_conv *tc = (struct perf_record_time_conv *)event;
size_t ret;
ret = fprintf(fp, "\n... Time Shift %" PRI_lu64 "\n", tc->time_shift);
ret += fprintf(fp, "... Time Muliplier %" PRI_lu64 "\n", tc->time_mult);
ret += fprintf(fp, "... Time Zero %" PRI_lu64 "\n", tc->time_zero);
/*
* The event TIME_CONV was extended for the fields from "time_cycles"
* when supported cap_user_time_short, for backward compatibility,
* prints the extended fields only if they are contained in the event.
*/
if (event_contains(*tc, time_cycles)) {
ret += fprintf(fp, "... Time Cycles %" PRI_lu64 "\n",
tc->time_cycles);
ret += fprintf(fp, "... Time Mask %#" PRI_lx64 "\n",
tc->time_mask);
ret += fprintf(fp, "... Cap Time Zero %" PRId32 "\n",
tc->cap_user_time_zero);
ret += fprintf(fp, "... Cap Time Short %" PRId32 "\n",
tc->cap_user_time_short);
}
return ret;
}
| linux-master | tools/perf/util/tsc.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <subcmd/pager.h>
#include <string.h>
#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include "color.h"
#include <math.h>
#include <unistd.h>
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
{
if (value) {
if (!strcasecmp(value, "never"))
return 0;
if (!strcasecmp(value, "always"))
return 1;
if (!strcasecmp(value, "auto"))
goto auto_color;
}
/* Missing or explicit false to turn off colorization */
if (!perf_config_bool(var, value))
return 0;
/* any normal truth value defaults to 'auto' */
auto_color:
if (stdout_is_tty < 0)
stdout_is_tty = isatty(1);
if (stdout_is_tty || pager_in_use()) {
char *term = getenv("TERM");
if (term && strcmp(term, "dumb"))
return 1;
}
return 0;
}
int perf_color_default_config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "color.ui")) {
perf_use_color_default = perf_config_colorbool(var, value, -1);
return 0;
}
return 0;
}
| linux-master | tools/perf/util/color_config.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on strlist.c by:
* (c) 2009 Arnaldo Carvalho de Melo <[email protected]>
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include "rblist.h"
int rblist__add_node(struct rblist *rblist, const void *new_entry)
{
struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node;
bool leftmost = true;
while (*p != NULL) {
int rc;
parent = *p;
rc = rblist->node_cmp(parent, new_entry);
if (rc > 0)
p = &(*p)->rb_left;
else if (rc < 0) {
p = &(*p)->rb_right;
leftmost = false;
}
else
return -EEXIST;
}
new_node = rblist->node_new(rblist, new_entry);
if (new_node == NULL)
return -ENOMEM;
rb_link_node(new_node, parent, p);
rb_insert_color_cached(new_node, &rblist->entries, leftmost);
++rblist->nr_entries;
return 0;
}
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
{
rb_erase_cached(rb_node, &rblist->entries);
--rblist->nr_entries;
rblist->node_delete(rblist, rb_node);
}
static struct rb_node *__rblist__findnew(struct rblist *rblist,
const void *entry,
bool create)
{
struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node = NULL;
bool leftmost = true;
while (*p != NULL) {
int rc;
parent = *p;
rc = rblist->node_cmp(parent, entry);
if (rc > 0)
p = &(*p)->rb_left;
else if (rc < 0) {
p = &(*p)->rb_right;
leftmost = false;
}
else
return parent;
}
if (create) {
new_node = rblist->node_new(rblist, entry);
if (new_node) {
rb_link_node(new_node, parent, p);
rb_insert_color_cached(new_node,
&rblist->entries, leftmost);
++rblist->nr_entries;
}
}
return new_node;
}
struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
{
return __rblist__findnew(rblist, entry, false);
}
struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
{
return __rblist__findnew(rblist, entry, true);
}
void rblist__init(struct rblist *rblist)
{
if (rblist != NULL) {
rblist->entries = RB_ROOT_CACHED;
rblist->nr_entries = 0;
}
return;
}
void rblist__exit(struct rblist *rblist)
{
struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
rblist__remove_node(rblist, pos);
}
}
void rblist__delete(struct rblist *rblist)
{
if (rblist != NULL) {
rblist__exit(rblist);
free(rblist);
}
}
struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
{
struct rb_node *node;
for (node = rb_first_cached(&rblist->entries); node;
node = rb_next(node)) {
if (!idx--)
return node;
}
return NULL;
}
| linux-master | tools/perf/util/rblist.c |
#include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <math.h>
#include <perf/cpumap.h>
#include "color.h"
#include "counts.h"
#include "evlist.h"
#include "evsel.h"
#include "stat.h"
#include "top.h"
#include "thread_map.h"
#include "cpumap.h"
#include "string2.h"
#include <linux/ctype.h>
#include "cgroup.h"
#include <api/fs/fs.h>
#include "util.h"
#include "iostat.h"
#include "pmu.h"
#include "pmus.h"
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
#define MGROUP_LEN 50
#define METRIC_LEN 38
#define EVNAME_LEN 32
#define COUNTS_LEN 18
#define INTERVAL_LEN 16
#define CGROUP_LEN 16
#define COMM_LEN 16
#define PID_LEN 7
#define CPUS_LEN 4
static int aggr_header_lens[] = {
[AGGR_CORE] = 18,
[AGGR_CACHE] = 22,
[AGGR_DIE] = 12,
[AGGR_SOCKET] = 6,
[AGGR_NODE] = 6,
[AGGR_NONE] = 6,
[AGGR_THREAD] = 16,
[AGGR_GLOBAL] = 0,
};
static const char *aggr_header_csv[] = {
[AGGR_CORE] = "core,cpus,",
[AGGR_CACHE] = "cache,cpus,",
[AGGR_DIE] = "die,cpus,",
[AGGR_SOCKET] = "socket,cpus,",
[AGGR_NONE] = "cpu,",
[AGGR_THREAD] = "comm-pid,",
[AGGR_NODE] = "node,",
[AGGR_GLOBAL] = ""
};
static const char *aggr_header_std[] = {
[AGGR_CORE] = "core",
[AGGR_CACHE] = "cache",
[AGGR_DIE] = "die",
[AGGR_SOCKET] = "socket",
[AGGR_NONE] = "cpu",
[AGGR_THREAD] = "comm-pid",
[AGGR_NODE] = "node",
[AGGR_GLOBAL] = ""
};
static void print_running_std(struct perf_stat_config *config, u64 run, u64 ena)
{
if (run != ena)
fprintf(config->output, " (%.2f%%)", 100.0 * run / ena);
}
static void print_running_csv(struct perf_stat_config *config, u64 run, u64 ena)
{
double enabled_percent = 100;
if (run != ena)
enabled_percent = 100 * run / ena;
fprintf(config->output, "%s%" PRIu64 "%s%.2f",
config->csv_sep, run, config->csv_sep, enabled_percent);
}
static void print_running_json(struct perf_stat_config *config, u64 run, u64 ena)
{
double enabled_percent = 100;
if (run != ena)
enabled_percent = 100 * run / ena;
fprintf(config->output, "\"event-runtime\" : %" PRIu64 ", \"pcnt-running\" : %.2f, ",
run, enabled_percent);
}
static void print_running(struct perf_stat_config *config,
u64 run, u64 ena, bool before_metric)
{
if (config->json_output) {
if (before_metric)
print_running_json(config, run, ena);
} else if (config->csv_output) {
if (before_metric)
print_running_csv(config, run, ena);
} else {
if (!before_metric)
print_running_std(config, run, ena);
}
}
static void print_noise_pct_std(struct perf_stat_config *config,
double pct)
{
if (pct)
fprintf(config->output, " ( +-%6.2f%% )", pct);
}
static void print_noise_pct_csv(struct perf_stat_config *config,
double pct)
{
fprintf(config->output, "%s%.2f%%", config->csv_sep, pct);
}
static void print_noise_pct_json(struct perf_stat_config *config,
double pct)
{
fprintf(config->output, "\"variance\" : %.2f, ", pct);
}
static void print_noise_pct(struct perf_stat_config *config,
double total, double avg, bool before_metric)
{
double pct = rel_stddev_stats(total, avg);
if (config->json_output) {
if (before_metric)
print_noise_pct_json(config, pct);
} else if (config->csv_output) {
if (before_metric)
print_noise_pct_csv(config, pct);
} else {
if (!before_metric)
print_noise_pct_std(config, pct);
}
}
static void print_noise(struct perf_stat_config *config,
struct evsel *evsel, double avg, bool before_metric)
{
struct perf_stat_evsel *ps;
if (config->run_count == 1)
return;
ps = evsel->stats;
print_noise_pct(config, stddev_stats(&ps->res_stats), avg, before_metric);
}
static void print_cgroup_std(struct perf_stat_config *config, const char *cgrp_name)
{
fprintf(config->output, " %-*s", CGROUP_LEN, cgrp_name);
}
static void print_cgroup_csv(struct perf_stat_config *config, const char *cgrp_name)
{
fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
}
static void print_cgroup_json(struct perf_stat_config *config, const char *cgrp_name)
{
fprintf(config->output, "\"cgroup\" : \"%s\", ", cgrp_name);
}
static void print_cgroup(struct perf_stat_config *config, struct cgroup *cgrp)
{
if (nr_cgroups || config->cgroup_list) {
const char *cgrp_name = cgrp ? cgrp->name : "";
if (config->json_output)
print_cgroup_json(config, cgrp_name);
else if (config->csv_output)
print_cgroup_csv(config, cgrp_name);
else
print_cgroup_std(config, cgrp_name);
}
}
static void print_aggr_id_std(struct perf_stat_config *config,
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
{
FILE *output = config->output;
int idx = config->aggr_mode;
char buf[128];
switch (config->aggr_mode) {
case AGGR_CORE:
snprintf(buf, sizeof(buf), "S%d-D%d-C%d", id.socket, id.die, id.core);
break;
case AGGR_CACHE:
snprintf(buf, sizeof(buf), "S%d-D%d-L%d-ID%d",
id.socket, id.die, id.cache_lvl, id.cache);
break;
case AGGR_DIE:
snprintf(buf, sizeof(buf), "S%d-D%d", id.socket, id.die);
break;
case AGGR_SOCKET:
snprintf(buf, sizeof(buf), "S%d", id.socket);
break;
case AGGR_NODE:
snprintf(buf, sizeof(buf), "N%d", id.node);
break;
case AGGR_NONE:
if (evsel->percore && !config->percore_show_thread) {
snprintf(buf, sizeof(buf), "S%d-D%d-C%d ",
id.socket, id.die, id.core);
fprintf(output, "%-*s ",
aggr_header_lens[AGGR_CORE], buf);
} else if (id.cpu.cpu > -1) {
fprintf(output, "CPU%-*d ",
aggr_header_lens[AGGR_NONE] - 3, id.cpu.cpu);
}
return;
case AGGR_THREAD:
fprintf(output, "%*s-%-*d ",
COMM_LEN, perf_thread_map__comm(evsel->core.threads, id.thread_idx),
PID_LEN, perf_thread_map__pid(evsel->core.threads, id.thread_idx));
return;
case AGGR_GLOBAL:
case AGGR_UNSET:
case AGGR_MAX:
default:
return;
}
fprintf(output, "%-*s %*d ", aggr_header_lens[idx], buf, 4, aggr_nr);
}
static void print_aggr_id_csv(struct perf_stat_config *config,
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
{
FILE *output = config->output;
const char *sep = config->csv_sep;
switch (config->aggr_mode) {
case AGGR_CORE:
fprintf(output, "S%d-D%d-C%d%s%d%s",
id.socket, id.die, id.core, sep, aggr_nr, sep);
break;
case AGGR_CACHE:
fprintf(config->output, "S%d-D%d-L%d-ID%d%s%d%s",
id.socket, id.die, id.cache_lvl, id.cache, sep, aggr_nr, sep);
break;
case AGGR_DIE:
fprintf(output, "S%d-D%d%s%d%s",
id.socket, id.die, sep, aggr_nr, sep);
break;
case AGGR_SOCKET:
fprintf(output, "S%d%s%d%s",
id.socket, sep, aggr_nr, sep);
break;
case AGGR_NODE:
fprintf(output, "N%d%s%d%s",
id.node, sep, aggr_nr, sep);
break;
case AGGR_NONE:
if (evsel->percore && !config->percore_show_thread) {
fprintf(output, "S%d-D%d-C%d%s",
id.socket, id.die, id.core, sep);
} else if (id.cpu.cpu > -1) {
fprintf(output, "CPU%d%s",
id.cpu.cpu, sep);
}
break;
case AGGR_THREAD:
fprintf(output, "%s-%d%s",
perf_thread_map__comm(evsel->core.threads, id.thread_idx),
perf_thread_map__pid(evsel->core.threads, id.thread_idx),
sep);
break;
case AGGR_GLOBAL:
case AGGR_UNSET:
case AGGR_MAX:
default:
break;
}
}
static void print_aggr_id_json(struct perf_stat_config *config,
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
{
FILE *output = config->output;
switch (config->aggr_mode) {
case AGGR_CORE:
fprintf(output, "\"core\" : \"S%d-D%d-C%d\", \"aggregate-number\" : %d, ",
id.socket, id.die, id.core, aggr_nr);
break;
case AGGR_CACHE:
fprintf(output, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d, ",
id.socket, id.die, id.cache_lvl, id.cache, aggr_nr);
break;
case AGGR_DIE:
fprintf(output, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d, ",
id.socket, id.die, aggr_nr);
break;
case AGGR_SOCKET:
fprintf(output, "\"socket\" : \"S%d\", \"aggregate-number\" : %d, ",
id.socket, aggr_nr);
break;
case AGGR_NODE:
fprintf(output, "\"node\" : \"N%d\", \"aggregate-number\" : %d, ",
id.node, aggr_nr);
break;
case AGGR_NONE:
if (evsel->percore && !config->percore_show_thread) {
fprintf(output, "\"core\" : \"S%d-D%d-C%d\"",
id.socket, id.die, id.core);
} else if (id.cpu.cpu > -1) {
fprintf(output, "\"cpu\" : \"%d\", ",
id.cpu.cpu);
}
break;
case AGGR_THREAD:
fprintf(output, "\"thread\" : \"%s-%d\", ",
perf_thread_map__comm(evsel->core.threads, id.thread_idx),
perf_thread_map__pid(evsel->core.threads, id.thread_idx));
break;
case AGGR_GLOBAL:
case AGGR_UNSET:
case AGGR_MAX:
default:
break;
}
}
static void aggr_printout(struct perf_stat_config *config,
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
{
if (config->json_output)
print_aggr_id_json(config, evsel, id, aggr_nr);
else if (config->csv_output)
print_aggr_id_csv(config, evsel, id, aggr_nr);
else
print_aggr_id_std(config, evsel, id, aggr_nr);
}
struct outstate {
FILE *fh;
bool newline;
bool first;
const char *prefix;
int nfields;
int aggr_nr;
struct aggr_cpu_id id;
struct evsel *evsel;
struct cgroup *cgrp;
};
static void new_line_std(struct perf_stat_config *config __maybe_unused,
void *ctx)
{
struct outstate *os = ctx;
os->newline = true;
}
static inline void __new_line_std_csv(struct perf_stat_config *config,
struct outstate *os)
{
fputc('\n', os->fh);
if (os->prefix)
fputs(os->prefix, os->fh);
aggr_printout(config, os->evsel, os->id, os->aggr_nr);
}
static inline void __new_line_std(struct outstate *os)
{
fprintf(os->fh, " ");
}
static void do_new_line_std(struct perf_stat_config *config,
struct outstate *os)
{
__new_line_std_csv(config, os);
if (config->aggr_mode == AGGR_NONE)
fprintf(os->fh, " ");
__new_line_std(os);
}
static void print_metric_std(struct perf_stat_config *config,
void *ctx, const char *color, const char *fmt,
const char *unit, double val)
{
struct outstate *os = ctx;
FILE *out = os->fh;
int n;
bool newline = os->newline;
os->newline = false;
if (unit == NULL || fmt == NULL) {
fprintf(out, "%-*s", METRIC_LEN, "");
return;
}
if (newline)
do_new_line_std(config, os);
n = fprintf(out, " # ");
if (color)
n += color_fprintf(out, color, fmt, val);
else
n += fprintf(out, fmt, val);
fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
}
static void new_line_csv(struct perf_stat_config *config, void *ctx)
{
struct outstate *os = ctx;
int i;
__new_line_std_csv(config, os);
for (i = 0; i < os->nfields; i++)
fputs(config->csv_sep, os->fh);
}
static void print_metric_csv(struct perf_stat_config *config __maybe_unused,
void *ctx,
const char *color __maybe_unused,
const char *fmt, const char *unit, double val)
{
struct outstate *os = ctx;
FILE *out = os->fh;
char buf[64], *vals, *ends;
if (unit == NULL || fmt == NULL) {
fprintf(out, "%s%s", config->csv_sep, config->csv_sep);
return;
}
snprintf(buf, sizeof(buf), fmt, val);
ends = vals = skip_spaces(buf);
while (isdigit(*ends) || *ends == '.')
ends++;
*ends = 0;
fprintf(out, "%s%s%s%s", config->csv_sep, vals, config->csv_sep, skip_spaces(unit));
}
static void print_metric_json(struct perf_stat_config *config __maybe_unused,
void *ctx,
const char *color __maybe_unused,
const char *fmt __maybe_unused,
const char *unit, double val)
{
struct outstate *os = ctx;
FILE *out = os->fh;
fprintf(out, "\"metric-value\" : \"%f\", ", val);
fprintf(out, "\"metric-unit\" : \"%s\"", unit);
if (!config->metric_only)
fprintf(out, "}");
}
static void new_line_json(struct perf_stat_config *config, void *ctx)
{
struct outstate *os = ctx;
fputs("\n{", os->fh);
if (os->prefix)
fprintf(os->fh, "%s", os->prefix);
aggr_printout(config, os->evsel, os->id, os->aggr_nr);
}
static void print_metricgroup_header_json(struct perf_stat_config *config,
void *ctx,
const char *metricgroup_name)
{
if (!metricgroup_name)
return;
fprintf(config->output, "\"metricgroup\" : \"%s\"}", metricgroup_name);
new_line_json(config, ctx);
}
static void print_metricgroup_header_csv(struct perf_stat_config *config,
void *ctx,
const char *metricgroup_name)
{
struct outstate *os = ctx;
int i;
if (!metricgroup_name) {
/* Leave space for running and enabling */
for (i = 0; i < os->nfields - 2; i++)
fputs(config->csv_sep, os->fh);
return;
}
for (i = 0; i < os->nfields; i++)
fputs(config->csv_sep, os->fh);
fprintf(config->output, "%s", metricgroup_name);
new_line_csv(config, ctx);
}
static void print_metricgroup_header_std(struct perf_stat_config *config,
void *ctx,
const char *metricgroup_name)
{
struct outstate *os = ctx;
int n;
if (!metricgroup_name) {
__new_line_std(os);
return;
}
n = fprintf(config->output, " %*s", EVNAME_LEN, metricgroup_name);
fprintf(config->output, "%*s", MGROUP_LEN - n - 1, "");
}
/* Filter out some columns that don't work well in metrics only mode */
static bool valid_only_metric(const char *unit)
{
if (!unit)
return false;
if (strstr(unit, "/sec") ||
strstr(unit, "CPUs utilized"))
return false;
return true;
}
static const char *fixunit(char *buf, struct evsel *evsel,
const char *unit)
{
if (!strncmp(unit, "of all", 6)) {
snprintf(buf, 1024, "%s %s", evsel__name(evsel),
unit);
return buf;
}
return unit;
}
static void print_metric_only(struct perf_stat_config *config,
void *ctx, const char *color, const char *fmt,
const char *unit, double val)
{
struct outstate *os = ctx;
FILE *out = os->fh;
char buf[1024], str[1024];
unsigned mlen = config->metric_only_len;
if (!valid_only_metric(unit))
return;
unit = fixunit(buf, os->evsel, unit);
if (mlen < strlen(unit))
mlen = strlen(unit) + 1;
if (color)
mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
color_snprintf(str, sizeof(str), color ?: "", fmt, val);
fprintf(out, "%*s ", mlen, str);
os->first = false;
}
static void print_metric_only_csv(struct perf_stat_config *config __maybe_unused,
void *ctx, const char *color __maybe_unused,
const char *fmt,
const char *unit, double val)
{
struct outstate *os = ctx;
FILE *out = os->fh;
char buf[64], *vals, *ends;
char tbuf[1024];
if (!valid_only_metric(unit))
return;
unit = fixunit(tbuf, os->evsel, unit);
snprintf(buf, sizeof(buf), fmt ?: "", val);
ends = vals = skip_spaces(buf);
while (isdigit(*ends) || *ends == '.')
ends++;
*ends = 0;
fprintf(out, "%s%s", vals, config->csv_sep);
os->first = false;
}
static void print_metric_only_json(struct perf_stat_config *config __maybe_unused,
void *ctx, const char *color __maybe_unused,
const char *fmt,
const char *unit, double val)
{
struct outstate *os = ctx;
FILE *out = os->fh;
char buf[64], *vals, *ends;
char tbuf[1024];
if (!valid_only_metric(unit))
return;
unit = fixunit(tbuf, os->evsel, unit);
snprintf(buf, sizeof(buf), fmt ?: "", val);
ends = vals = skip_spaces(buf);
while (isdigit(*ends) || *ends == '.')
ends++;
*ends = 0;
if (!unit[0] || !vals[0])
return;
fprintf(out, "%s\"%s\" : \"%s\"", os->first ? "" : ", ", unit, vals);
os->first = false;
}
static void new_line_metric(struct perf_stat_config *config __maybe_unused,
void *ctx __maybe_unused)
{
}
static void print_metric_header(struct perf_stat_config *config,
void *ctx, const char *color __maybe_unused,
const char *fmt __maybe_unused,
const char *unit, double val __maybe_unused)
{
struct outstate *os = ctx;
char tbuf[1024];
/* In case of iostat, print metric header for first root port only */
if (config->iostat_run &&
os->evsel->priv != os->evsel->evlist->selected->priv)
return;
if (os->evsel->cgrp != os->cgrp)
return;
if (!valid_only_metric(unit))
return;
unit = fixunit(tbuf, os->evsel, unit);
if (config->json_output)
return;
else if (config->csv_output)
fprintf(os->fh, "%s%s", unit, config->csv_sep);
else
fprintf(os->fh, "%*s ", config->metric_only_len, unit);
}
static void print_counter_value_std(struct perf_stat_config *config,
struct evsel *evsel, double avg, bool ok)
{
FILE *output = config->output;
double sc = evsel->scale;
const char *fmt;
const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
if (config->big_num)
fmt = floor(sc) != sc ? "%'*.2f " : "%'*.0f ";
else
fmt = floor(sc) != sc ? "%*.2f " : "%*.0f ";
if (ok)
fprintf(output, fmt, COUNTS_LEN, avg);
else
fprintf(output, "%*s ", COUNTS_LEN, bad_count);
if (evsel->unit)
fprintf(output, "%-*s ", config->unit_width, evsel->unit);
fprintf(output, "%-*s", EVNAME_LEN, evsel__name(evsel));
}
static void print_counter_value_csv(struct perf_stat_config *config,
struct evsel *evsel, double avg, bool ok)
{
FILE *output = config->output;
double sc = evsel->scale;
const char *sep = config->csv_sep;
const char *fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
if (ok)
fprintf(output, fmt, avg, sep);
else
fprintf(output, "%s%s", bad_count, sep);
if (evsel->unit)
fprintf(output, "%s%s", evsel->unit, sep);
fprintf(output, "%s", evsel__name(evsel));
}
static void print_counter_value_json(struct perf_stat_config *config,
struct evsel *evsel, double avg, bool ok)
{
FILE *output = config->output;
const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
if (ok)
fprintf(output, "\"counter-value\" : \"%f\", ", avg);
else
fprintf(output, "\"counter-value\" : \"%s\", ", bad_count);
if (evsel->unit)
fprintf(output, "\"unit\" : \"%s\", ", evsel->unit);
fprintf(output, "\"event\" : \"%s\", ", evsel__name(evsel));
}
static void print_counter_value(struct perf_stat_config *config,
struct evsel *evsel, double avg, bool ok)
{
if (config->json_output)
print_counter_value_json(config, evsel, avg, ok);
else if (config->csv_output)
print_counter_value_csv(config, evsel, avg, ok);
else
print_counter_value_std(config, evsel, avg, ok);
}
static void abs_printout(struct perf_stat_config *config,
struct aggr_cpu_id id, int aggr_nr,
struct evsel *evsel, double avg, bool ok)
{
aggr_printout(config, evsel, id, aggr_nr);
print_counter_value(config, evsel, avg, ok);
print_cgroup(config, evsel->cgrp);
}
static bool is_mixed_hw_group(struct evsel *counter)
{
struct evlist *evlist = counter->evlist;
u32 pmu_type = counter->core.attr.type;
struct evsel *pos;
if (counter->core.nr_members < 2)
return false;
evlist__for_each_entry(evlist, pos) {
/* software events can be part of any hardware group */
if (pos->core.attr.type == PERF_TYPE_SOFTWARE)
continue;
if (pmu_type == PERF_TYPE_SOFTWARE) {
pmu_type = pos->core.attr.type;
continue;
}
if (pmu_type != pos->core.attr.type)
return true;
}
return false;
}
static bool evlist__has_hybrid(struct evlist *evlist)
{
struct evsel *evsel;
if (perf_pmus__num_core_pmus() == 1)
return false;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.is_pmu_core)
return true;
}
return false;
}
static void printout(struct perf_stat_config *config, struct outstate *os,
double uval, u64 run, u64 ena, double noise, int aggr_idx)
{
struct perf_stat_output_ctx out;
print_metric_t pm;
new_line_t nl;
print_metricgroup_header_t pmh;
bool ok = true;
struct evsel *counter = os->evsel;
if (config->csv_output) {
pm = config->metric_only ? print_metric_only_csv : print_metric_csv;
nl = config->metric_only ? new_line_metric : new_line_csv;
pmh = print_metricgroup_header_csv;
os->nfields = 4 + (counter->cgrp ? 1 : 0);
} else if (config->json_output) {
pm = config->metric_only ? print_metric_only_json : print_metric_json;
nl = config->metric_only ? new_line_metric : new_line_json;
pmh = print_metricgroup_header_json;
} else {
pm = config->metric_only ? print_metric_only : print_metric_std;
nl = config->metric_only ? new_line_metric : new_line_std;
pmh = print_metricgroup_header_std;
}
if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
if (config->metric_only) {
pm(config, os, NULL, "", "", 0);
return;
}
ok = false;
if (counter->supported) {
if (!evlist__has_hybrid(counter->evlist)) {
config->print_free_counters_hint = 1;
if (is_mixed_hw_group(counter))
config->print_mixed_hw_group_error = 1;
}
}
}
out.print_metric = pm;
out.new_line = nl;
out.print_metricgroup_header = pmh;
out.ctx = os;
out.force_header = false;
if (!config->metric_only && !counter->default_metricgroup) {
abs_printout(config, os->id, os->aggr_nr, counter, uval, ok);
print_noise(config, counter, noise, /*before_metric=*/true);
print_running(config, run, ena, /*before_metric=*/true);
}
if (ok) {
if (!config->metric_only && counter->default_metricgroup) {
void *from = NULL;
aggr_printout(config, os->evsel, os->id, os->aggr_nr);
/* Print out all the metricgroup with the same metric event. */
do {
int num = 0;
/* Print out the new line for the next new metricgroup. */
if (from) {
if (config->json_output)
new_line_json(config, (void *)os);
else
__new_line_std_csv(config, os);
}
print_noise(config, counter, noise, /*before_metric=*/true);
print_running(config, run, ena, /*before_metric=*/true);
from = perf_stat__print_shadow_stats_metricgroup(config, counter, aggr_idx,
&num, from, &out,
&config->metric_events);
} while (from != NULL);
} else
perf_stat__print_shadow_stats(config, counter, uval, aggr_idx,
&out, &config->metric_events);
} else {
pm(config, os, /*color=*/NULL, /*format=*/NULL, /*unit=*/"", /*val=*/0);
}
if (!config->metric_only) {
print_noise(config, counter, noise, /*before_metric=*/false);
print_running(config, run, ena, /*before_metric=*/false);
}
}
static void uniquify_event_name(struct evsel *counter)
{
char *new_name;
char *config;
int ret = 0;
if (counter->uniquified_name || counter->use_config_name ||
!counter->pmu_name || !strncmp(evsel__name(counter), counter->pmu_name,
strlen(counter->pmu_name)))
return;
config = strchr(counter->name, '/');
if (config) {
if (asprintf(&new_name,
"%s%s", counter->pmu_name, config) > 0) {
free(counter->name);
counter->name = new_name;
}
} else {
if (evsel__is_hybrid(counter)) {
ret = asprintf(&new_name, "%s/%s/",
counter->pmu_name, counter->name);
} else {
ret = asprintf(&new_name, "%s [%s]",
counter->name, counter->pmu_name);
}
if (ret) {
free(counter->name);
counter->name = new_name;
}
}
counter->uniquified_name = true;
}
static bool hybrid_uniquify(struct evsel *evsel, struct perf_stat_config *config)
{
return evsel__is_hybrid(evsel) && !config->hybrid_merge;
}
static void uniquify_counter(struct perf_stat_config *config, struct evsel *counter)
{
if (config->no_merge || hybrid_uniquify(counter, config))
uniquify_event_name(counter);
}
/**
* should_skip_zero_count() - Check if the event should print 0 values.
* @config: The perf stat configuration (including aggregation mode).
* @counter: The evsel with its associated cpumap.
* @id: The aggregation id that is being queried.
*
* Due to mismatch between the event cpumap or thread-map and the
* aggregation mode, sometimes it'd iterate the counter with the map
* which does not contain any values.
*
* For example, uncore events have dedicated CPUs to manage them,
* result for other CPUs should be zero and skipped.
*
* Return: %true if the value should NOT be printed, %false if the value
* needs to be printed like "<not counted>" or "<not supported>".
*/
static bool should_skip_zero_counter(struct perf_stat_config *config,
struct evsel *counter,
const struct aggr_cpu_id *id)
{
struct perf_cpu cpu;
int idx;
/*
* Skip value 0 when enabling --per-thread globally,
* otherwise it will have too many 0 output.
*/
if (config->aggr_mode == AGGR_THREAD && config->system_wide)
return true;
/* Tool events have the software PMU but are only gathered on 1. */
if (evsel__is_tool(counter))
return true;
/*
* Skip value 0 when it's an uncore event and the given aggr id
* does not belong to the PMU cpumask.
*/
if (!counter->pmu || !counter->pmu->is_uncore)
return false;
perf_cpu_map__for_each_cpu(cpu, idx, counter->pmu->cpus) {
struct aggr_cpu_id own_id = config->aggr_get_id(config, cpu);
if (aggr_cpu_id__equal(id, &own_id))
return false;
}
return true;
}
static void print_counter_aggrdata(struct perf_stat_config *config,
struct evsel *counter, int aggr_idx,
struct outstate *os)
{
FILE *output = config->output;
u64 ena, run, val;
double uval;
struct perf_stat_evsel *ps = counter->stats;
struct perf_stat_aggr *aggr = &ps->aggr[aggr_idx];
struct aggr_cpu_id id = config->aggr_map->map[aggr_idx];
double avg = aggr->counts.val;
bool metric_only = config->metric_only;
os->id = id;
os->aggr_nr = aggr->nr;
os->evsel = counter;
/* Skip already merged uncore/hybrid events */
if (counter->merged_stat)
return;
uniquify_counter(config, counter);
val = aggr->counts.val;
ena = aggr->counts.ena;
run = aggr->counts.run;
if (perf_stat__skip_metric_event(counter, &config->metric_events, ena, run))
return;
if (val == 0 && should_skip_zero_counter(config, counter, &id))
return;
if (!metric_only) {
if (config->json_output)
fputc('{', output);
if (os->prefix)
fprintf(output, "%s", os->prefix);
else if (config->summary && config->csv_output &&
!config->no_csv_summary && !config->interval)
fprintf(output, "%s%s", "summary", config->csv_sep);
}
uval = val * counter->scale;
printout(config, os, uval, run, ena, avg, aggr_idx);
if (!metric_only)
fputc('\n', output);
}
static void print_metric_begin(struct perf_stat_config *config,
struct evlist *evlist,
struct outstate *os, int aggr_idx)
{
struct perf_stat_aggr *aggr;
struct aggr_cpu_id id;
struct evsel *evsel;
os->first = true;
if (!config->metric_only)
return;
if (config->json_output)
fputc('{', config->output);
if (os->prefix)
fprintf(config->output, "%s", os->prefix);
evsel = evlist__first(evlist);
id = config->aggr_map->map[aggr_idx];
aggr = &evsel->stats->aggr[aggr_idx];
aggr_printout(config, evsel, id, aggr->nr);
print_cgroup(config, os->cgrp ? : evsel->cgrp);
}
static void print_metric_end(struct perf_stat_config *config, struct outstate *os)
{
FILE *output = config->output;
if (!config->metric_only)
return;
if (config->json_output) {
if (os->first)
fputs("\"metric-value\" : \"none\"", output);
fputc('}', output);
}
fputc('\n', output);
}
static void print_aggr(struct perf_stat_config *config,
struct evlist *evlist,
struct outstate *os)
{
struct evsel *counter;
int aggr_idx;
if (!config->aggr_map || !config->aggr_get_id)
return;
/*
* With metric_only everything is on a single line.
* Without each counter has its own line.
*/
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
print_metric_begin(config, evlist, os, aggr_idx);
evlist__for_each_entry(evlist, counter) {
print_counter_aggrdata(config, counter, aggr_idx, os);
}
print_metric_end(config, os);
}
}
static void print_aggr_cgroup(struct perf_stat_config *config,
struct evlist *evlist,
struct outstate *os)
{
struct evsel *counter, *evsel;
int aggr_idx;
if (!config->aggr_map || !config->aggr_get_id)
return;
evlist__for_each_entry(evlist, evsel) {
if (os->cgrp == evsel->cgrp)
continue;
os->cgrp = evsel->cgrp;
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
print_metric_begin(config, evlist, os, aggr_idx);
evlist__for_each_entry(evlist, counter) {
if (counter->cgrp != os->cgrp)
continue;
print_counter_aggrdata(config, counter, aggr_idx, os);
}
print_metric_end(config, os);
}
}
}
static void print_counter(struct perf_stat_config *config,
struct evsel *counter, struct outstate *os)
{
int aggr_idx;
/* AGGR_THREAD doesn't have config->aggr_get_id */
if (!config->aggr_map)
return;
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
print_counter_aggrdata(config, counter, aggr_idx, os);
}
}
static void print_no_aggr_metric(struct perf_stat_config *config,
struct evlist *evlist,
struct outstate *os)
{
int all_idx;
struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
struct evsel *counter;
bool first = true;
evlist__for_each_entry(evlist, counter) {
u64 ena, run, val;
double uval;
struct perf_stat_evsel *ps = counter->stats;
int aggr_idx = perf_cpu_map__idx(evsel__cpus(counter), cpu);
if (aggr_idx < 0)
continue;
os->evsel = counter;
os->id = aggr_cpu_id__cpu(cpu, /*data=*/NULL);
if (first) {
print_metric_begin(config, evlist, os, aggr_idx);
first = false;
}
val = ps->aggr[aggr_idx].counts.val;
ena = ps->aggr[aggr_idx].counts.ena;
run = ps->aggr[aggr_idx].counts.run;
uval = val * counter->scale;
printout(config, os, uval, run, ena, 1.0, aggr_idx);
}
if (!first)
print_metric_end(config, os);
}
}
static void print_metric_headers_std(struct perf_stat_config *config,
bool no_indent)
{
fputc(' ', config->output);
if (!no_indent) {
int len = aggr_header_lens[config->aggr_mode];
if (nr_cgroups || config->cgroup_list)
len += CGROUP_LEN + 1;
fprintf(config->output, "%*s", len, "");
}
}
static void print_metric_headers_csv(struct perf_stat_config *config,
bool no_indent __maybe_unused)
{
if (config->interval)
fputs("time,", config->output);
if (!config->iostat_run)
fputs(aggr_header_csv[config->aggr_mode], config->output);
}
static void print_metric_headers_json(struct perf_stat_config *config __maybe_unused,
bool no_indent __maybe_unused)
{
}
static void print_metric_headers(struct perf_stat_config *config,
struct evlist *evlist, bool no_indent)
{
struct evsel *counter;
struct outstate os = {
.fh = config->output
};
struct perf_stat_output_ctx out = {
.ctx = &os,
.print_metric = print_metric_header,
.new_line = new_line_metric,
.force_header = true,
};
if (config->json_output)
print_metric_headers_json(config, no_indent);
else if (config->csv_output)
print_metric_headers_csv(config, no_indent);
else
print_metric_headers_std(config, no_indent);
if (config->iostat_run)
iostat_print_header_prefix(config);
if (config->cgroup_list)
os.cgrp = evlist__first(evlist)->cgrp;
/* Print metrics headers only */
evlist__for_each_entry(evlist, counter) {
os.evsel = counter;
perf_stat__print_shadow_stats(config, counter, 0,
0,
&out,
&config->metric_events);
}
if (!config->json_output)
fputc('\n', config->output);
}
static void prepare_interval(struct perf_stat_config *config,
char *prefix, size_t len, struct timespec *ts)
{
if (config->iostat_run)
return;
if (config->json_output)
scnprintf(prefix, len, "\"interval\" : %lu.%09lu, ",
(unsigned long) ts->tv_sec, ts->tv_nsec);
else if (config->csv_output)
scnprintf(prefix, len, "%lu.%09lu%s",
(unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
else
scnprintf(prefix, len, "%6lu.%09lu ",
(unsigned long) ts->tv_sec, ts->tv_nsec);
}
static void print_header_interval_std(struct perf_stat_config *config,
struct target *_target __maybe_unused,
struct evlist *evlist,
int argc __maybe_unused,
const char **argv __maybe_unused)
{
FILE *output = config->output;
switch (config->aggr_mode) {
case AGGR_NODE:
case AGGR_SOCKET:
case AGGR_DIE:
case AGGR_CACHE:
case AGGR_CORE:
fprintf(output, "#%*s %-*s cpus",
INTERVAL_LEN - 1, "time",
aggr_header_lens[config->aggr_mode],
aggr_header_std[config->aggr_mode]);
break;
case AGGR_NONE:
fprintf(output, "#%*s %-*s",
INTERVAL_LEN - 1, "time",
aggr_header_lens[config->aggr_mode],
aggr_header_std[config->aggr_mode]);
break;
case AGGR_THREAD:
fprintf(output, "#%*s %*s-%-*s",
INTERVAL_LEN - 1, "time",
COMM_LEN, "comm", PID_LEN, "pid");
break;
case AGGR_GLOBAL:
default:
if (!config->iostat_run)
fprintf(output, "#%*s",
INTERVAL_LEN - 1, "time");
case AGGR_UNSET:
case AGGR_MAX:
break;
}
if (config->metric_only)
print_metric_headers(config, evlist, true);
else
fprintf(output, " %*s %*s events\n",
COUNTS_LEN, "counts", config->unit_width, "unit");
}
static void print_header_std(struct perf_stat_config *config,
struct target *_target, struct evlist *evlist,
int argc, const char **argv)
{
FILE *output = config->output;
int i;
fprintf(output, "\n");
fprintf(output, " Performance counter stats for ");
if (_target->bpf_str)
fprintf(output, "\'BPF program(s) %s", _target->bpf_str);
else if (_target->system_wide)
fprintf(output, "\'system wide");
else if (_target->cpu_list)
fprintf(output, "\'CPU(s) %s", _target->cpu_list);
else if (!target__has_task(_target)) {
fprintf(output, "\'%s", argv ? argv[0] : "pipe");
for (i = 1; argv && (i < argc); i++)
fprintf(output, " %s", argv[i]);
} else if (_target->pid)
fprintf(output, "process id \'%s", _target->pid);
else
fprintf(output, "thread id \'%s", _target->tid);
fprintf(output, "\'");
if (config->run_count > 1)
fprintf(output, " (%d runs)", config->run_count);
fprintf(output, ":\n\n");
if (config->metric_only)
print_metric_headers(config, evlist, false);
}
static void print_header_csv(struct perf_stat_config *config,
struct target *_target __maybe_unused,
struct evlist *evlist,
int argc __maybe_unused,
const char **argv __maybe_unused)
{
if (config->metric_only)
print_metric_headers(config, evlist, true);
}
static void print_header_json(struct perf_stat_config *config,
struct target *_target __maybe_unused,
struct evlist *evlist,
int argc __maybe_unused,
const char **argv __maybe_unused)
{
if (config->metric_only)
print_metric_headers(config, evlist, true);
}
static void print_header(struct perf_stat_config *config,
struct target *_target,
struct evlist *evlist,
int argc, const char **argv)
{
static int num_print_iv;
fflush(stdout);
if (config->interval_clear)
puts(CONSOLE_CLEAR);
if (num_print_iv == 0 || config->interval_clear) {
if (config->json_output)
print_header_json(config, _target, evlist, argc, argv);
else if (config->csv_output)
print_header_csv(config, _target, evlist, argc, argv);
else if (config->interval)
print_header_interval_std(config, _target, evlist, argc, argv);
else
print_header_std(config, _target, evlist, argc, argv);
}
if (num_print_iv++ == 25)
num_print_iv = 0;
}
static int get_precision(double num)
{
if (num > 1)
return 0;
return lround(ceil(-log10(num)));
}
static void print_table(struct perf_stat_config *config,
FILE *output, int precision, double avg)
{
char tmp[64];
int idx, indent = 0;
scnprintf(tmp, 64, " %17.*f", precision, avg);
while (tmp[indent] == ' ')
indent++;
fprintf(output, "%*s# Table of individual measurements:\n", indent, "");
for (idx = 0; idx < config->run_count; idx++) {
double run = (double) config->walltime_run[idx] / NSEC_PER_SEC;
int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5);
fprintf(output, " %17.*f (%+.*f) ",
precision, run, precision, run - avg);
for (h = 0; h < n; h++)
fprintf(output, "#");
fprintf(output, "\n");
}
fprintf(output, "\n%*s# Final result:\n", indent, "");
}
static double timeval2double(struct timeval *t)
{
return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
}
static void print_footer(struct perf_stat_config *config)
{
double avg = avg_stats(config->walltime_nsecs_stats) / NSEC_PER_SEC;
FILE *output = config->output;
if (config->interval || config->csv_output || config->json_output)
return;
if (!config->null_run)
fprintf(output, "\n");
if (config->run_count == 1) {
fprintf(output, " %17.9f seconds time elapsed", avg);
if (config->ru_display) {
double ru_utime = timeval2double(&config->ru_data.ru_utime);
double ru_stime = timeval2double(&config->ru_data.ru_stime);
fprintf(output, "\n\n");
fprintf(output, " %17.9f seconds user\n", ru_utime);
fprintf(output, " %17.9f seconds sys\n", ru_stime);
}
} else {
double sd = stddev_stats(config->walltime_nsecs_stats) / NSEC_PER_SEC;
/*
* Display at most 2 more significant
* digits than the stddev inaccuracy.
*/
int precision = get_precision(sd) + 2;
if (config->walltime_run_table)
print_table(config, output, precision, avg);
fprintf(output, " %17.*f +- %.*f seconds time elapsed",
precision, avg, precision, sd);
print_noise_pct(config, sd, avg, /*before_metric=*/false);
}
fprintf(output, "\n\n");
if (config->print_free_counters_hint && sysctl__nmi_watchdog_enabled())
fprintf(output,
"Some events weren't counted. Try disabling the NMI watchdog:\n"
" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
" perf stat ...\n"
" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
if (config->print_mixed_hw_group_error)
fprintf(output,
"The events in group usually have to be from "
"the same PMU. Try reorganizing the group.\n");
}
static void print_percore(struct perf_stat_config *config,
struct evsel *counter, struct outstate *os)
{
bool metric_only = config->metric_only;
FILE *output = config->output;
struct cpu_aggr_map *core_map;
int aggr_idx, core_map_len = 0;
if (!config->aggr_map || !config->aggr_get_id)
return;
if (config->percore_show_thread)
return print_counter(config, counter, os);
/*
* core_map will hold the aggr_cpu_id for the cores that have been
* printed so that each core is printed just once.
*/
core_map = cpu_aggr_map__empty_new(config->aggr_map->nr);
if (core_map == NULL) {
fprintf(output, "Cannot allocate per-core aggr map for display\n");
return;
}
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
struct perf_cpu curr_cpu = config->aggr_map->map[aggr_idx].cpu;
struct aggr_cpu_id core_id = aggr_cpu_id__core(curr_cpu, NULL);
bool found = false;
for (int i = 0; i < core_map_len; i++) {
if (aggr_cpu_id__equal(&core_map->map[i], &core_id)) {
found = true;
break;
}
}
if (found)
continue;
print_counter_aggrdata(config, counter, aggr_idx, os);
core_map->map[core_map_len++] = core_id;
}
free(core_map);
if (metric_only)
fputc('\n', output);
}
static void print_cgroup_counter(struct perf_stat_config *config, struct evlist *evlist,
struct outstate *os)
{
struct evsel *counter;
evlist__for_each_entry(evlist, counter) {
if (os->cgrp != counter->cgrp) {
if (os->cgrp != NULL)
print_metric_end(config, os);
os->cgrp = counter->cgrp;
print_metric_begin(config, evlist, os, /*aggr_idx=*/0);
}
print_counter(config, counter, os);
}
if (os->cgrp)
print_metric_end(config, os);
}
void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
struct target *_target, struct timespec *ts,
int argc, const char **argv)
{
bool metric_only = config->metric_only;
int interval = config->interval;
struct evsel *counter;
char buf[64];
struct outstate os = {
.fh = config->output,
.first = true,
};
if (config->iostat_run)
evlist->selected = evlist__first(evlist);
if (interval) {
os.prefix = buf;
prepare_interval(config, buf, sizeof(buf), ts);
}
print_header(config, _target, evlist, argc, argv);
switch (config->aggr_mode) {
case AGGR_CORE:
case AGGR_CACHE:
case AGGR_DIE:
case AGGR_SOCKET:
case AGGR_NODE:
if (config->cgroup_list)
print_aggr_cgroup(config, evlist, &os);
else
print_aggr(config, evlist, &os);
break;
case AGGR_THREAD:
case AGGR_GLOBAL:
if (config->iostat_run) {
iostat_print_counters(evlist, config, ts, buf,
(iostat_print_counter_t)print_counter, &os);
} else if (config->cgroup_list) {
print_cgroup_counter(config, evlist, &os);
} else {
print_metric_begin(config, evlist, &os, /*aggr_idx=*/0);
evlist__for_each_entry(evlist, counter) {
print_counter(config, counter, &os);
}
print_metric_end(config, &os);
}
break;
case AGGR_NONE:
if (metric_only)
print_no_aggr_metric(config, evlist, &os);
else {
evlist__for_each_entry(evlist, counter) {
if (counter->percore)
print_percore(config, counter, &os);
else
print_counter(config, counter, &os);
}
}
break;
case AGGR_MAX:
case AGGR_UNSET:
default:
break;
}
print_footer(config);
fflush(config->output);
}
| linux-master | tools/perf/util/stat-display.c |
// SPDX-License-Identifier: GPL-2.0
/*
* bpf-prologue.c
*
* Copyright (C) 2015 He Kuang <[email protected]>
* Copyright (C) 2015 Wang Nan <[email protected]>
* Copyright (C) 2015 Huawei Inc.
*/
#include <bpf/libbpf.h>
#include "debug.h"
#include "bpf-loader.h"
#include "bpf-prologue.h"
#include "probe-finder.h"
#include <errno.h>
#include <stdlib.h>
#include <dwarf-regs.h>
#include <linux/filter.h>
#define BPF_REG_SIZE 8
#define JMP_TO_ERROR_CODE -1
#define JMP_TO_SUCCESS_CODE -2
#define JMP_TO_USER_CODE -3
struct bpf_insn_pos {
struct bpf_insn *begin;
struct bpf_insn *end;
struct bpf_insn *pos;
};
static inline int
pos_get_cnt(struct bpf_insn_pos *pos)
{
return pos->pos - pos->begin;
}
static int
append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
{
if (!pos->pos)
return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
if (pos->pos + 1 >= pos->end) {
pr_err("bpf prologue: prologue too long\n");
pos->pos = NULL;
return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
}
*(pos->pos)++ = new_insn;
return 0;
}
static int
check_pos(struct bpf_insn_pos *pos)
{
if (!pos->pos || pos->pos >= pos->end)
return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
return 0;
}
/*
* Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see
* Documentation/trace/kprobetrace.rst) to size field of BPF_LDX_MEM
* instruction (BPF_{B,H,W,DW}).
*/
static int
argtype_to_ldx_size(const char *type)
{
int arg_size = type ? atoi(&type[1]) : 64;
switch (arg_size) {
case 8:
return BPF_B;
case 16:
return BPF_H;
case 32:
return BPF_W;
case 64:
default:
return BPF_DW;
}
}
static const char *
insn_sz_to_str(int insn_sz)
{
switch (insn_sz) {
case BPF_B:
return "BPF_B";
case BPF_H:
return "BPF_H";
case BPF_W:
return "BPF_W";
case BPF_DW:
return "BPF_DW";
default:
return "UNKNOWN";
}
}
/* Give it a shorter name */
#define ins(i, p) append_insn((i), (p))
/*
* Give a register name (in 'reg'), generate instruction to
* load register into an eBPF register rd:
* 'ldd target_reg, offset(ctx_reg)', where:
* ctx_reg is pre initialized to pointer of 'struct pt_regs'.
*/
static int
gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
const char *reg, int target_reg)
{
int offset = regs_query_register_offset(reg);
if (offset < 0) {
pr_err("bpf: prologue: failed to get register %s\n",
reg);
return offset;
}
ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
return check_pos(pos);
}
/*
* Generate a BPF_FUNC_probe_read function call.
*
* src_base_addr_reg is a register holding base address,
* dst_addr_reg is a register holding dest address (on stack),
* result is:
*
* *[dst_addr_reg] = *([src_base_addr_reg] + offset)
*
* Arguments of BPF_FUNC_probe_read:
* ARG1: ptr to stack (dest)
* ARG2: size (8)
* ARG3: unsafe ptr (src)
*/
static int
gen_read_mem(struct bpf_insn_pos *pos,
int src_base_addr_reg,
int dst_addr_reg,
long offset,
int probeid)
{
/* mov arg3, src_base_addr_reg */
if (src_base_addr_reg != BPF_REG_ARG3)
ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
/* add arg3, #offset */
if (offset)
ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
/* mov arg2, #reg_size */
ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
/* mov arg1, dst_addr_reg */
if (dst_addr_reg != BPF_REG_ARG1)
ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
/* Call probe_read */
ins(BPF_EMIT_CALL(probeid), pos);
/*
* Error processing: if read fail, goto error code,
* will be relocated. Target should be the start of
* error processing code.
*/
ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
pos);
return check_pos(pos);
}
/*
* Each arg should be bare register. Fetch and save them into argument
* registers (r3 - r5).
*
* BPF_REG_1 should have been initialized with pointer to
* 'struct pt_regs'.
*/
static int
gen_prologue_fastpath(struct bpf_insn_pos *pos,
struct probe_trace_arg *args, int nargs)
{
int i, err = 0;
for (i = 0; i < nargs; i++) {
err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
BPF_PROLOGUE_START_ARG_REG + i);
if (err)
goto errout;
}
return check_pos(pos);
errout:
return err;
}
/*
* Slow path:
* At least one argument has the form of 'offset($rx)'.
*
* Following code first stores them into stack, then loads all of then
* to r2 - r5.
* Before final loading, the final result should be:
*
* low address
* BPF_REG_FP - 24 ARG3
* BPF_REG_FP - 16 ARG2
* BPF_REG_FP - 8 ARG1
* BPF_REG_FP
* high address
*
* For each argument (described as: offn(...off2(off1(reg)))),
* generates following code:
*
* r7 <- fp
* r7 <- r7 - stack_offset // Ideal code should initialize r7 using
* // fp before generating args. However,
* // eBPF won't regard r7 as stack pointer
* // if it is generated by minus 8 from
* // another stack pointer except fp.
* // This is why we have to set r7
* // to fp for each variable.
* r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
* (r7) <- r3 // skip following instructions for bare reg
* r3 <- r3 + off1 . // skip if off1 == 0
* r2 <- 8 \
* r1 <- r7 |-> generated by gen_read_mem()
* call probe_read /
* jnei r0, 0, err ./
* r3 <- (r7)
* r3 <- r3 + off2 . // skip if off2 == 0
* r2 <- 8 \ // r2 may be broken by probe_read, so set again
* r1 <- r7 |-> generated by gen_read_mem()
* call probe_read /
* jnei r0, 0, err ./
* ...
*/
static int
gen_prologue_slowpath(struct bpf_insn_pos *pos,
struct probe_trace_arg *args, int nargs)
{
int err, i, probeid;
for (i = 0; i < nargs; i++) {
struct probe_trace_arg *arg = &args[i];
const char *reg = arg->value;
struct probe_trace_arg_ref *ref = NULL;
int stack_offset = (i + 1) * -8;
pr_debug("prologue: fetch arg %d, base reg is %s\n",
i, reg);
/* value of base register is stored into ARG3 */
err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
BPF_REG_ARG3);
if (err) {
pr_err("prologue: failed to get offset of register %s\n",
reg);
goto errout;
}
/* Make r7 the stack pointer. */
ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
/* r7 += -8 */
ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
/*
* Store r3 (base register) onto stack
* Ensure fp[offset] is set.
* fp is the only valid base register when storing
* into stack. We are not allowed to use r7 as base
* register here.
*/
ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
stack_offset), pos);
ref = arg->ref;
probeid = BPF_FUNC_probe_read_kernel;
while (ref) {
pr_debug("prologue: arg %d: offset %ld\n",
i, ref->offset);
if (ref->user_access)
probeid = BPF_FUNC_probe_read_user;
err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
ref->offset, probeid);
if (err) {
pr_err("prologue: failed to generate probe_read function call\n");
goto errout;
}
ref = ref->next;
/*
* Load previous result into ARG3. Use
* BPF_REG_FP instead of r7 because verifier
* allows FP based addressing only.
*/
if (ref)
ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
BPF_REG_FP, stack_offset), pos);
}
}
/* Final pass: read to registers */
for (i = 0; i < nargs; i++) {
int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
pr_debug("prologue: load arg %d, insn_sz is %s\n",
i, insn_sz_to_str(insn_sz));
ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
}
ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
return check_pos(pos);
errout:
return err;
}
static int
prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
struct bpf_insn *success_code, struct bpf_insn *user_code)
{
struct bpf_insn *insn;
if (check_pos(pos))
return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
for (insn = pos->begin; insn < pos->pos; insn++) {
struct bpf_insn *target;
u8 class = BPF_CLASS(insn->code);
u8 opcode;
if (class != BPF_JMP)
continue;
opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL)
continue;
switch (insn->off) {
case JMP_TO_ERROR_CODE:
target = error_code;
break;
case JMP_TO_SUCCESS_CODE:
target = success_code;
break;
case JMP_TO_USER_CODE:
target = user_code;
break;
default:
pr_err("bpf prologue: internal error: relocation failed\n");
return -BPF_LOADER_ERRNO__PROLOGUE;
}
insn->off = target - (insn + 1);
}
return 0;
}
int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
struct bpf_insn *new_prog, size_t *new_cnt,
size_t cnt_space)
{
struct bpf_insn *success_code = NULL;
struct bpf_insn *error_code = NULL;
struct bpf_insn *user_code = NULL;
struct bpf_insn_pos pos;
bool fastpath = true;
int err = 0, i;
if (!new_prog || !new_cnt)
return -EINVAL;
if (cnt_space > BPF_MAXINSNS)
cnt_space = BPF_MAXINSNS;
pos.begin = new_prog;
pos.end = new_prog + cnt_space;
pos.pos = new_prog;
if (!nargs) {
ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
&pos);
if (check_pos(&pos))
goto errout;
*new_cnt = pos_get_cnt(&pos);
return 0;
}
if (nargs > BPF_PROLOGUE_MAX_ARGS) {
pr_warning("bpf: prologue: %d arguments are dropped\n",
nargs - BPF_PROLOGUE_MAX_ARGS);
nargs = BPF_PROLOGUE_MAX_ARGS;
}
/* First pass: validation */
for (i = 0; i < nargs; i++) {
struct probe_trace_arg_ref *ref = args[i].ref;
if (args[i].value[0] == '@') {
/* TODO: fetch global variable */
pr_err("bpf: prologue: global %s%+ld not support\n",
args[i].value, ref ? ref->offset : 0);
return -ENOTSUP;
}
while (ref) {
/* fastpath is true if all args has ref == NULL */
fastpath = false;
/*
* Instruction encodes immediate value using
* s32, ref->offset is long. On systems which
* can't fill long in s32, refuse to process if
* ref->offset too large (or small).
*/
#ifdef __LP64__
#define OFFSET_MAX ((1LL << 31) - 1)
#define OFFSET_MIN ((1LL << 31) * -1)
if (ref->offset > OFFSET_MAX ||
ref->offset < OFFSET_MIN) {
pr_err("bpf: prologue: offset out of bound: %ld\n",
ref->offset);
return -BPF_LOADER_ERRNO__PROLOGUEOOB;
}
#endif
ref = ref->next;
}
}
pr_debug("prologue: pass validation\n");
if (fastpath) {
/* If all variables are registers... */
pr_debug("prologue: fast path\n");
err = gen_prologue_fastpath(&pos, args, nargs);
if (err)
goto errout;
} else {
pr_debug("prologue: slow path\n");
/* Initialization: move ctx to a callee saved register. */
ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
err = gen_prologue_slowpath(&pos, args, nargs);
if (err)
goto errout;
/*
* start of ERROR_CODE (only slow pass needs error code)
* mov r2 <- 1 // r2 is error number
* mov r3 <- 0 // r3, r4... should be touched or
* // verifier would complain
* mov r4 <- 0
* ...
* goto usercode
*/
error_code = pos.pos;
ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
&pos);
for (i = 0; i < nargs; i++)
ins(BPF_ALU64_IMM(BPF_MOV,
BPF_PROLOGUE_START_ARG_REG + i,
0),
&pos);
ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
&pos);
}
/*
* start of SUCCESS_CODE:
* mov r2 <- 0
* goto usercode // skip
*/
success_code = pos.pos;
ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
/*
* start of USER_CODE:
* Restore ctx to r1
*/
user_code = pos.pos;
if (!fastpath) {
/*
* Only slow path needs restoring of ctx. In fast path,
* register are loaded directly from r1.
*/
ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
err = prologue_relocate(&pos, error_code, success_code,
user_code);
if (err)
goto errout;
}
err = check_pos(&pos);
if (err)
goto errout;
*new_cnt = pos_get_cnt(&pos);
return 0;
errout:
return err;
}
| linux-master | tools/perf/util/bpf-prologue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* perf_hooks.c
*
* Copyright (C) 2016 Wang Nan <[email protected]>
* Copyright (C) 2016 Huawei Inc.
*/
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <setjmp.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include "util/debug.h"
#include "util/perf-hooks.h"
static sigjmp_buf jmpbuf;
static const struct perf_hook_desc *current_perf_hook;
void perf_hooks__invoke(const struct perf_hook_desc *desc)
{
if (!(desc && desc->p_hook_func && *desc->p_hook_func))
return;
if (sigsetjmp(jmpbuf, 1)) {
pr_warning("Fatal error (SEGFAULT) in perf hook '%s'\n",
desc->hook_name);
*(current_perf_hook->p_hook_func) = NULL;
} else {
current_perf_hook = desc;
(**desc->p_hook_func)(desc->hook_ctx);
}
current_perf_hook = NULL;
}
void perf_hooks__recover(void)
{
if (current_perf_hook)
siglongjmp(jmpbuf, 1);
}
#define PERF_HOOK(name) \
perf_hook_func_t __perf_hook_func_##name = NULL; \
struct perf_hook_desc __perf_hook_desc_##name = \
{.hook_name = #name, \
.p_hook_func = &__perf_hook_func_##name, \
.hook_ctx = NULL};
#include "perf-hooks-list.h"
#undef PERF_HOOK
#define PERF_HOOK(name) \
&__perf_hook_desc_##name,
static struct perf_hook_desc *perf_hooks[] = {
#include "perf-hooks-list.h"
};
#undef PERF_HOOK
int perf_hooks__set_hook(const char *hook_name,
perf_hook_func_t hook_func,
void *hook_ctx)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(perf_hooks); i++) {
if (strcmp(hook_name, perf_hooks[i]->hook_name) != 0)
continue;
if (*(perf_hooks[i]->p_hook_func))
pr_warning("Overwrite existing hook: %s\n", hook_name);
*(perf_hooks[i]->p_hook_func) = hook_func;
perf_hooks[i]->hook_ctx = hook_ctx;
return 0;
}
return -ENOENT;
}
perf_hook_func_t perf_hooks__get_hook(const char *hook_name)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(perf_hooks); i++) {
if (strcmp(hook_name, perf_hooks[i]->hook_name) != 0)
continue;
return *(perf_hooks[i]->p_hook_func);
}
return ERR_PTR(-ENOENT);
}
| linux-master | tools/perf/util/perf-hooks.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include "perf-sys.h"
#include "util/cloexec.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/parse-events.h"
#include "util/perf_api_probe.h"
#include <perf/cpumap.h>
#include <errno.h>
typedef void (*setup_probe_fn_t)(struct evsel *evsel);
static int perf_do_probe_api(setup_probe_fn_t fn, struct perf_cpu cpu, const char *str)
{
struct evlist *evlist;
struct evsel *evsel;
unsigned long flags = perf_event_open_cloexec_flag();
int err = -EAGAIN, fd;
static pid_t pid = -1;
evlist = evlist__new();
if (!evlist)
return -ENOMEM;
if (parse_event(evlist, str))
goto out_delete;
evsel = evlist__first(evlist);
while (1) {
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags);
if (fd < 0) {
if (pid == -1 && errno == EACCES) {
pid = 0;
continue;
}
goto out_delete;
}
break;
}
close(fd);
fn(evsel);
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags);
if (fd < 0) {
if (errno == EINVAL)
err = -EINVAL;
goto out_delete;
}
close(fd);
err = 0;
out_delete:
evlist__delete(evlist);
return err;
}
static bool perf_probe_api(setup_probe_fn_t fn)
{
const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
struct perf_cpu_map *cpus;
struct perf_cpu cpu;
int ret, i = 0;
cpus = perf_cpu_map__new(NULL);
if (!cpus)
return false;
cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus);
do {
ret = perf_do_probe_api(fn, cpu, try[i++]);
if (!ret)
return true;
} while (ret == -EAGAIN && try[i]);
return false;
}
static void perf_probe_sample_identifier(struct evsel *evsel)
{
evsel->core.attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
}
static void perf_probe_comm_exec(struct evsel *evsel)
{
evsel->core.attr.comm_exec = 1;
}
static void perf_probe_context_switch(struct evsel *evsel)
{
evsel->core.attr.context_switch = 1;
}
static void perf_probe_text_poke(struct evsel *evsel)
{
evsel->core.attr.text_poke = 1;
}
static void perf_probe_build_id(struct evsel *evsel)
{
evsel->core.attr.build_id = 1;
}
static void perf_probe_cgroup(struct evsel *evsel)
{
evsel->core.attr.cgroup = 1;
}
bool perf_can_sample_identifier(void)
{
return perf_probe_api(perf_probe_sample_identifier);
}
bool perf_can_comm_exec(void)
{
return perf_probe_api(perf_probe_comm_exec);
}
bool perf_can_record_switch_events(void)
{
return perf_probe_api(perf_probe_context_switch);
}
bool perf_can_record_text_poke_events(void)
{
return perf_probe_api(perf_probe_text_poke);
}
bool perf_can_record_cpu_wide(void)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
.exclude_kernel = 1,
};
struct perf_cpu_map *cpus;
struct perf_cpu cpu;
int fd;
cpus = perf_cpu_map__new(NULL);
if (!cpus)
return false;
cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus);
fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0);
if (fd < 0)
return false;
close(fd);
return true;
}
/*
* Architectures are expected to know if AUX area sampling is supported by the
* hardware. Here we check for kernel support.
*/
bool perf_can_aux_sample(void)
{
struct perf_event_attr attr = {
.size = sizeof(struct perf_event_attr),
.exclude_kernel = 1,
/*
* Non-zero value causes the kernel to calculate the effective
* attribute size up to that byte.
*/
.aux_sample_size = 1,
};
int fd;
fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
/*
* If the kernel attribute is big enough to contain aux_sample_size
* then we assume that it is supported. We are relying on the kernel to
* validate the attribute size before anything else that could be wrong.
*/
if (fd < 0 && errno == E2BIG)
return false;
if (fd >= 0)
close(fd);
return true;
}
bool perf_can_record_build_id(void)
{
return perf_probe_api(perf_probe_build_id);
}
bool perf_can_record_cgroup(void)
{
return perf_probe_api(perf_probe_cgroup);
}
| linux-master | tools/perf/util/perf_api_probe.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* System call table mapper
*
* (C) 2016 Arnaldo Carvalho de Melo <[email protected]>
*/
#include "syscalltbl.h"
#include <stdlib.h>
#include <linux/compiler.h>
#include <linux/zalloc.h>
#ifdef HAVE_SYSCALL_TABLE_SUPPORT
#include <string.h>
#include "string2.h"
#if defined(__x86_64__)
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_x86_64;
#elif defined(__s390x__)
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_s390_64;
#elif defined(__powerpc64__)
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_64_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_powerpc_64;
#elif defined(__powerpc__)
#include <asm/syscalls_32.c>
const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_powerpc_32;
#elif defined(__aarch64__)
#include <asm/syscalls.c>
const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_arm64;
#elif defined(__mips__)
#include <asm/syscalls_n64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_MIPS_N64_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_mips_n64;
#elif defined(__loongarch__)
#include <asm/syscalls.c>
const int syscalltbl_native_max_id = SYSCALLTBL_LOONGARCH_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_loongarch;
#endif
struct syscall {
int id;
const char *name;
};
static int syscallcmpname(const void *vkey, const void *ventry)
{
const char *key = vkey;
const struct syscall *entry = ventry;
return strcmp(key, entry->name);
}
static int syscallcmp(const void *va, const void *vb)
{
const struct syscall *a = va, *b = vb;
return strcmp(a->name, b->name);
}
static int syscalltbl__init_native(struct syscalltbl *tbl)
{
int nr_entries = 0, i, j;
struct syscall *entries;
for (i = 0; i <= syscalltbl_native_max_id; ++i)
if (syscalltbl_native[i])
++nr_entries;
entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries);
if (tbl->syscalls.entries == NULL)
return -1;
for (i = 0, j = 0; i <= syscalltbl_native_max_id; ++i) {
if (syscalltbl_native[i]) {
entries[j].name = syscalltbl_native[i];
entries[j].id = i;
++j;
}
}
qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp);
tbl->syscalls.nr_entries = nr_entries;
tbl->syscalls.max_id = syscalltbl_native_max_id;
return 0;
}
struct syscalltbl *syscalltbl__new(void)
{
struct syscalltbl *tbl = malloc(sizeof(*tbl));
if (tbl) {
if (syscalltbl__init_native(tbl)) {
free(tbl);
return NULL;
}
}
return tbl;
}
void syscalltbl__delete(struct syscalltbl *tbl)
{
zfree(&tbl->syscalls.entries);
free(tbl);
}
const char *syscalltbl__name(const struct syscalltbl *tbl __maybe_unused, int id)
{
return id <= syscalltbl_native_max_id ? syscalltbl_native[id]: NULL;
}
int syscalltbl__id(struct syscalltbl *tbl, const char *name)
{
struct syscall *sc = bsearch(name, tbl->syscalls.entries,
tbl->syscalls.nr_entries, sizeof(*sc),
syscallcmpname);
return sc ? sc->id : -1;
}
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
{
int i;
struct syscall *syscalls = tbl->syscalls.entries;
for (i = *idx + 1; i < tbl->syscalls.nr_entries; ++i) {
if (strglobmatch(syscalls[i].name, syscall_glob)) {
*idx = i;
return syscalls[i].id;
}
}
return -1;
}
int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
{
*idx = -1;
return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
}
#else /* HAVE_SYSCALL_TABLE_SUPPORT */
#include <libaudit.h>
struct syscalltbl *syscalltbl__new(void)
{
struct syscalltbl *tbl = zalloc(sizeof(*tbl));
if (tbl)
tbl->audit_machine = audit_detect_machine();
return tbl;
}
void syscalltbl__delete(struct syscalltbl *tbl)
{
free(tbl);
}
const char *syscalltbl__name(const struct syscalltbl *tbl, int id)
{
return audit_syscall_to_name(id, tbl->audit_machine);
}
int syscalltbl__id(struct syscalltbl *tbl, const char *name)
{
return audit_name_to_syscall(name, tbl->audit_machine);
}
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl __maybe_unused,
const char *syscall_glob __maybe_unused, int *idx __maybe_unused)
{
return -1;
}
int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
{
return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
}
#endif /* HAVE_SYSCALL_TABLE_SUPPORT */
| linux-master | tools/perf/util/syscalltbl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on intlist.c by:
* (c) 2009 Arnaldo Carvalho de Melo <[email protected]>
*/
#include <errno.h>
#include <stdlib.h>
#include <linux/compiler.h>
#include "intlist.h"
static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused,
const void *entry)
{
unsigned long i = (unsigned long)entry;
struct rb_node *rc = NULL;
struct int_node *node = malloc(sizeof(*node));
if (node != NULL) {
node->i = i;
node->priv = NULL;
rc = &node->rb_node;
}
return rc;
}
static void int_node__delete(struct int_node *ilist)
{
free(ilist);
}
static void intlist__node_delete(struct rblist *rblist __maybe_unused,
struct rb_node *rb_node)
{
struct int_node *node = container_of(rb_node, struct int_node, rb_node);
int_node__delete(node);
}
static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
{
unsigned long i = (unsigned long)entry;
struct int_node *node = container_of(rb_node, struct int_node, rb_node);
if (node->i > i)
return 1;
else if (node->i < i)
return -1;
return 0;
}
int intlist__add(struct intlist *ilist, unsigned long i)
{
return rblist__add_node(&ilist->rblist, (void *)i);
}
void intlist__remove(struct intlist *ilist, struct int_node *node)
{
rblist__remove_node(&ilist->rblist, &node->rb_node);
}
static struct int_node *__intlist__findnew(struct intlist *ilist,
unsigned long i, bool create)
{
struct int_node *node = NULL;
struct rb_node *rb_node;
if (ilist == NULL)
return NULL;
if (create)
rb_node = rblist__findnew(&ilist->rblist, (void *)i);
else
rb_node = rblist__find(&ilist->rblist, (void *)i);
if (rb_node)
node = container_of(rb_node, struct int_node, rb_node);
return node;
}
struct int_node *intlist__find(struct intlist *ilist, unsigned long i)
{
return __intlist__findnew(ilist, i, false);
}
struct int_node *intlist__findnew(struct intlist *ilist, unsigned long i)
{
return __intlist__findnew(ilist, i, true);
}
static int intlist__parse_list(struct intlist *ilist, const char *s)
{
char *sep;
int err;
do {
unsigned long value = strtol(s, &sep, 10);
err = -EINVAL;
if (*sep != ',' && *sep != '\0')
break;
err = intlist__add(ilist, value);
if (err)
break;
s = sep + 1;
} while (*sep != '\0');
return err;
}
struct intlist *intlist__new(const char *slist)
{
struct intlist *ilist = malloc(sizeof(*ilist));
if (ilist != NULL) {
rblist__init(&ilist->rblist);
ilist->rblist.node_cmp = intlist__node_cmp;
ilist->rblist.node_new = intlist__node_new;
ilist->rblist.node_delete = intlist__node_delete;
if (slist && intlist__parse_list(ilist, slist))
goto out_delete;
}
return ilist;
out_delete:
intlist__delete(ilist);
return NULL;
}
void intlist__delete(struct intlist *ilist)
{
if (ilist != NULL)
rblist__delete(&ilist->rblist);
}
struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx)
{
struct int_node *node = NULL;
struct rb_node *rb_node;
rb_node = rblist__entry(&ilist->rblist, idx);
if (rb_node)
node = container_of(rb_node, struct int_node, rb_node);
return node;
}
| linux-master | tools/perf/util/intlist.c |
// SPDX-License-Identifier: GPL-2.0
#include "arm64-frame-pointer-unwind-support.h"
#include "callchain.h"
#include "event.h"
#include "perf_regs.h" // SMPL_REG_MASK
#include "unwind.h"
#define perf_event_arm_regs perf_event_arm64_regs
#include "../../arch/arm64/include/uapi/asm/perf_regs.h"
#undef perf_event_arm_regs
struct entries {
u64 stack[2];
size_t length;
};
static bool get_leaf_frame_caller_enabled(struct perf_sample *sample)
{
return callchain_param.record_mode == CALLCHAIN_FP && sample->user_regs.regs
&& sample->user_regs.mask & SMPL_REG_MASK(PERF_REG_ARM64_LR);
}
static int add_entry(struct unwind_entry *entry, void *arg)
{
struct entries *entries = arg;
entries->stack[entries->length++] = entry->ip;
return 0;
}
u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thread, int usr_idx)
{
int ret;
struct entries entries = {};
struct regs_dump old_regs = sample->user_regs;
if (!get_leaf_frame_caller_enabled(sample))
return 0;
/*
* If PC and SP are not recorded, get the value of PC from the stack
* and set its mask. SP is not used when doing the unwinding but it
* still needs to be set to prevent failures.
*/
if (!(sample->user_regs.mask & SMPL_REG_MASK(PERF_REG_ARM64_PC))) {
sample->user_regs.cache_mask |= SMPL_REG_MASK(PERF_REG_ARM64_PC);
sample->user_regs.cache_regs[PERF_REG_ARM64_PC] = sample->callchain->ips[usr_idx+1];
}
if (!(sample->user_regs.mask & SMPL_REG_MASK(PERF_REG_ARM64_SP))) {
sample->user_regs.cache_mask |= SMPL_REG_MASK(PERF_REG_ARM64_SP);
sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0;
}
ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
sample->user_regs = old_regs;
if (ret || entries.length != 2)
return ret;
return callchain_param.order == ORDER_CALLER ? entries.stack[0] : entries.stack[1];
}
| linux-master | tools/perf/util/arm64-frame-pointer-unwind-support.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HiSilicon PCIe Trace and Tuning (PTT) support
* Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
*/
#include <byteswap.h>
#include <endian.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include <unistd.h>
#include "auxtrace.h"
#include "color.h"
#include "debug.h"
#include "evsel.h"
#include "hisi-ptt.h"
#include "hisi-ptt-decoder/hisi-ptt-pkt-decoder.h"
#include "machine.h"
#include "session.h"
#include "tool.h"
#include <internal/lib.h>
struct hisi_ptt {
struct auxtrace auxtrace;
u32 auxtrace_type;
struct perf_session *session;
struct machine *machine;
u32 pmu_type;
};
struct hisi_ptt_queue {
struct hisi_ptt *ptt;
struct auxtrace_buffer *buffer;
};
static enum hisi_ptt_pkt_type hisi_ptt_check_packet_type(unsigned char *buf)
{
uint32_t head = *(uint32_t *)buf;
if ((HISI_PTT_8DW_CHECK_MASK & head) == HISI_PTT_IS_8DW_PKT)
return HISI_PTT_8DW_PKT;
return HISI_PTT_4DW_PKT;
}
static void hisi_ptt_dump(struct hisi_ptt *ptt __maybe_unused,
unsigned char *buf, size_t len)
{
const char *color = PERF_COLOR_BLUE;
enum hisi_ptt_pkt_type type;
size_t pos = 0;
int pkt_len;
type = hisi_ptt_check_packet_type(buf);
len = round_down(len, hisi_ptt_pkt_size[type]);
color_fprintf(stdout, color, ". ... HISI PTT data: size %zu bytes\n",
len);
while (len > 0) {
pkt_len = hisi_ptt_pkt_desc(buf, pos, type);
if (!pkt_len)
color_fprintf(stdout, color, " Bad packet!\n");
pos += pkt_len;
len -= pkt_len;
}
}
static void hisi_ptt_dump_event(struct hisi_ptt *ptt, unsigned char *buf,
size_t len)
{
printf(".\n");
hisi_ptt_dump(ptt, buf, len);
}
static int hisi_ptt_process_event(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
return 0;
}
static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
{
struct hisi_ptt *ptt = container_of(session->auxtrace, struct hisi_ptt,
auxtrace);
int fd = perf_data__fd(session->data);
int size = event->auxtrace.size;
void *data = malloc(size);
off_t data_offset;
int err;
if (!data)
return -errno;
if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
if (data_offset == -1)
return -errno;
}
err = readn(fd, data, size);
if (err != (ssize_t)size) {
free(data);
return -errno;
}
if (dump_trace)
hisi_ptt_dump_event(ptt, data, size);
return 0;
}
static int hisi_ptt_flush(struct perf_session *session __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
return 0;
}
static void hisi_ptt_free_events(struct perf_session *session __maybe_unused)
{
}
static void hisi_ptt_free(struct perf_session *session)
{
struct hisi_ptt *ptt = container_of(session->auxtrace, struct hisi_ptt,
auxtrace);
session->auxtrace = NULL;
free(ptt);
}
static bool hisi_ptt_evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel)
{
struct hisi_ptt *ptt = container_of(session->auxtrace, struct hisi_ptt, auxtrace);
return evsel->core.attr.type == ptt->pmu_type;
}
static void hisi_ptt_print_info(__u64 type)
{
if (!dump_trace)
return;
fprintf(stdout, " PMU Type %" PRId64 "\n", (s64) type);
}
int hisi_ptt_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
struct hisi_ptt *ptt;
if (auxtrace_info->header.size < HISI_PTT_AUXTRACE_PRIV_SIZE +
sizeof(struct perf_record_auxtrace_info))
return -EINVAL;
ptt = zalloc(sizeof(*ptt));
if (!ptt)
return -ENOMEM;
ptt->session = session;
ptt->machine = &session->machines.host; /* No kvm support */
ptt->auxtrace_type = auxtrace_info->type;
ptt->pmu_type = auxtrace_info->priv[0];
ptt->auxtrace.process_event = hisi_ptt_process_event;
ptt->auxtrace.process_auxtrace_event = hisi_ptt_process_auxtrace_event;
ptt->auxtrace.flush_events = hisi_ptt_flush;
ptt->auxtrace.free_events = hisi_ptt_free_events;
ptt->auxtrace.free = hisi_ptt_free;
ptt->auxtrace.evsel_is_auxtrace = hisi_ptt_evsel_is_auxtrace;
session->auxtrace = &ptt->auxtrace;
hisi_ptt_print_info(auxtrace_info->priv[0]);
return 0;
}
| linux-master | tools/perf/util/hisi-ptt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* genelf.c
* Copyright (C) 2014, Google, Inc
*
* Contributed by:
* Stephane Eranian <[email protected]>
*/
#include <sys/types.h>
#include <stddef.h>
#include <libelf.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <inttypes.h>
#include <fcntl.h>
#include <err.h>
#ifdef HAVE_DWARF_SUPPORT
#include <dwarf.h>
#endif
#include "genelf.h"
#include "../util/jitdump.h"
#include <linux/compiler.h>
#ifndef NT_GNU_BUILD_ID
#define NT_GNU_BUILD_ID 3
#endif
#define BUILD_ID_URANDOM /* different uuid for each run */
#ifdef HAVE_LIBCRYPTO_SUPPORT
#define BUILD_ID_MD5
#undef BUILD_ID_SHA /* does not seem to work well when linked with Java */
#undef BUILD_ID_URANDOM /* different uuid for each run */
#ifdef BUILD_ID_SHA
#include <openssl/sha.h>
#endif
#ifdef BUILD_ID_MD5
#include <openssl/evp.h>
#include <openssl/md5.h>
#endif
#endif
typedef struct {
unsigned int namesz; /* Size of entry's owner string */
unsigned int descsz; /* Size of the note descriptor */
unsigned int type; /* Interpretation of the descriptor */
char name[0]; /* Start of the name+desc data */
} Elf_Note;
struct options {
char *output;
int fd;
};
static char shd_string_table[] = {
0,
'.', 't', 'e', 'x', 't', 0, /* 1 */
'.', 's', 'h', 's', 't', 'r', 't', 'a', 'b', 0, /* 7 */
'.', 's', 'y', 'm', 't', 'a', 'b', 0, /* 17 */
'.', 's', 't', 'r', 't', 'a', 'b', 0, /* 25 */
'.', 'n', 'o', 't', 'e', '.', 'g', 'n', 'u', '.', 'b', 'u', 'i', 'l', 'd', '-', 'i', 'd', 0, /* 33 */
'.', 'd', 'e', 'b', 'u', 'g', '_', 'l', 'i', 'n', 'e', 0, /* 52 */
'.', 'd', 'e', 'b', 'u', 'g', '_', 'i', 'n', 'f', 'o', 0, /* 64 */
'.', 'd', 'e', 'b', 'u', 'g', '_', 'a', 'b', 'b', 'r', 'e', 'v', 0, /* 76 */
'.', 'e', 'h', '_', 'f', 'r', 'a', 'm', 'e', '_', 'h', 'd', 'r', 0, /* 90 */
'.', 'e', 'h', '_', 'f', 'r', 'a', 'm', 'e', 0, /* 104 */
};
static struct buildid_note {
Elf_Note desc; /* descsz: size of build-id, must be multiple of 4 */
char name[4]; /* GNU\0 */
char build_id[20];
} bnote;
static Elf_Sym symtab[]={
/* symbol 0 MUST be the undefined symbol */
{ .st_name = 0, /* index in sym_string table */
.st_info = ELF_ST_TYPE(STT_NOTYPE),
.st_shndx = 0, /* for now */
.st_value = 0x0,
.st_other = ELF_ST_VIS(STV_DEFAULT),
.st_size = 0,
},
{ .st_name = 1, /* index in sym_string table */
.st_info = ELF_ST_BIND(STB_LOCAL) | ELF_ST_TYPE(STT_FUNC),
.st_shndx = 1,
.st_value = 0, /* for now */
.st_other = ELF_ST_VIS(STV_DEFAULT),
.st_size = 0, /* for now */
}
};
#ifdef BUILD_ID_URANDOM
static void
gen_build_id(struct buildid_note *note,
unsigned long load_addr __maybe_unused,
const void *code __maybe_unused,
size_t csize __maybe_unused)
{
int fd;
size_t sz = sizeof(note->build_id);
ssize_t sret;
fd = open("/dev/urandom", O_RDONLY);
if (fd == -1)
err(1, "cannot access /dev/urandom for buildid");
sret = read(fd, note->build_id, sz);
close(fd);
if (sret != (ssize_t)sz)
memset(note->build_id, 0, sz);
}
#endif
#ifdef BUILD_ID_SHA
static void
gen_build_id(struct buildid_note *note,
unsigned long load_addr __maybe_unused,
const void *code,
size_t csize)
{
if (sizeof(note->build_id) < SHA_DIGEST_LENGTH)
errx(1, "build_id too small for SHA1");
SHA1(code, csize, (unsigned char *)note->build_id);
}
#endif
#ifdef BUILD_ID_MD5
static void
gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize)
{
EVP_MD_CTX *mdctx;
if (sizeof(note->build_id) < 16)
errx(1, "build_id too small for MD5");
mdctx = EVP_MD_CTX_new();
if (!mdctx)
errx(2, "failed to create EVP_MD_CTX");
EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
EVP_DigestUpdate(mdctx, &load_addr, sizeof(load_addr));
EVP_DigestUpdate(mdctx, code, csize);
EVP_DigestFinal_ex(mdctx, (unsigned char *)note->build_id, NULL);
EVP_MD_CTX_free(mdctx);
}
#endif
static int
jit_add_eh_frame_info(Elf *e, void* unwinding, uint64_t unwinding_header_size,
uint64_t unwinding_size, uint64_t base_offset)
{
Elf_Data *d;
Elf_Scn *scn;
Elf_Shdr *shdr;
uint64_t unwinding_table_size = unwinding_size - unwinding_header_size;
/*
* setup eh_frame section
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
return -1;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
return -1;
}
d->d_align = 8;
d->d_off = 0LL;
d->d_buf = unwinding;
d->d_type = ELF_T_BYTE;
d->d_size = unwinding_table_size;
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
return -1;
}
shdr->sh_name = 104;
shdr->sh_type = SHT_PROGBITS;
shdr->sh_addr = base_offset;
shdr->sh_flags = SHF_ALLOC;
shdr->sh_entsize = 0;
/*
* setup eh_frame_hdr section
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
return -1;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
return -1;
}
d->d_align = 4;
d->d_off = 0LL;
d->d_buf = unwinding + unwinding_table_size;
d->d_type = ELF_T_BYTE;
d->d_size = unwinding_header_size;
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
return -1;
}
shdr->sh_name = 90;
shdr->sh_type = SHT_PROGBITS;
shdr->sh_addr = base_offset + unwinding_table_size;
shdr->sh_flags = SHF_ALLOC;
shdr->sh_entsize = 0;
return 0;
}
/*
* fd: file descriptor open for writing for the output file
* load_addr: code load address (could be zero, just used for buildid)
* sym: function name (for native code - used as the symbol)
* code: the native code
* csize: the code size in bytes
*/
int
jit_write_elf(int fd, uint64_t load_addr, const char *sym,
const void *code, int csize,
void *debug __maybe_unused, int nr_debug_entries __maybe_unused,
void *unwinding, uint64_t unwinding_header_size, uint64_t unwinding_size)
{
Elf *e;
Elf_Data *d;
Elf_Scn *scn;
Elf_Ehdr *ehdr;
Elf_Phdr *phdr;
Elf_Shdr *shdr;
uint64_t eh_frame_base_offset;
char *strsym = NULL;
int symlen;
int retval = -1;
if (elf_version(EV_CURRENT) == EV_NONE) {
warnx("ELF initialization failed");
return -1;
}
e = elf_begin(fd, ELF_C_WRITE, NULL);
if (!e) {
warnx("elf_begin failed");
goto error;
}
/*
* setup ELF header
*/
ehdr = elf_newehdr(e);
if (!ehdr) {
warnx("cannot get ehdr");
goto error;
}
ehdr->e_ident[EI_DATA] = GEN_ELF_ENDIAN;
ehdr->e_ident[EI_CLASS] = GEN_ELF_CLASS;
ehdr->e_machine = GEN_ELF_ARCH;
ehdr->e_type = ET_DYN;
ehdr->e_entry = GEN_ELF_TEXT_OFFSET;
ehdr->e_version = EV_CURRENT;
ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
/*
* setup program header
*/
phdr = elf_newphdr(e, 1);
phdr[0].p_type = PT_LOAD;
phdr[0].p_offset = 0;
phdr[0].p_vaddr = 0;
phdr[0].p_paddr = 0;
phdr[0].p_filesz = csize;
phdr[0].p_memsz = csize;
phdr[0].p_flags = PF_X | PF_R;
phdr[0].p_align = 8;
/*
* setup text section
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto error;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto error;
}
d->d_align = 16;
d->d_off = 0LL;
d->d_buf = (void *)code;
d->d_type = ELF_T_BYTE;
d->d_size = csize;
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto error;
}
shdr->sh_name = 1;
shdr->sh_type = SHT_PROGBITS;
shdr->sh_addr = GEN_ELF_TEXT_OFFSET;
shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
shdr->sh_entsize = 0;
/*
* Setup .eh_frame_hdr and .eh_frame
*/
if (unwinding) {
eh_frame_base_offset = ALIGN_8(GEN_ELF_TEXT_OFFSET + csize);
retval = jit_add_eh_frame_info(e, unwinding,
unwinding_header_size, unwinding_size,
eh_frame_base_offset);
if (retval)
goto error;
retval = -1;
}
/*
* setup section headers string table
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto error;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto error;
}
d->d_align = 1;
d->d_off = 0LL;
d->d_buf = shd_string_table;
d->d_type = ELF_T_BYTE;
d->d_size = sizeof(shd_string_table);
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto error;
}
shdr->sh_name = 7; /* offset of '.shstrtab' in shd_string_table */
shdr->sh_type = SHT_STRTAB;
shdr->sh_flags = 0;
shdr->sh_entsize = 0;
/*
* setup symtab section
*/
symtab[1].st_size = csize;
symtab[1].st_value = GEN_ELF_TEXT_OFFSET;
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto error;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto error;
}
d->d_align = 8;
d->d_off = 0LL;
d->d_buf = symtab;
d->d_type = ELF_T_SYM;
d->d_size = sizeof(symtab);
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto error;
}
shdr->sh_name = 17; /* offset of '.symtab' in shd_string_table */
shdr->sh_type = SHT_SYMTAB;
shdr->sh_flags = 0;
shdr->sh_entsize = sizeof(Elf_Sym);
shdr->sh_link = unwinding ? 6 : 4; /* index of .strtab section */
/*
* setup symbols string table
* 2 = 1 for 0 in 1st entry, 1 for the 0 at end of symbol for 2nd entry
*/
symlen = 2 + strlen(sym);
strsym = calloc(1, symlen);
if (!strsym) {
warnx("cannot allocate strsym");
goto error;
}
strcpy(strsym + 1, sym);
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto error;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto error;
}
d->d_align = 1;
d->d_off = 0LL;
d->d_buf = strsym;
d->d_type = ELF_T_BYTE;
d->d_size = symlen;
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto error;
}
shdr->sh_name = 25; /* offset in shd_string_table */
shdr->sh_type = SHT_STRTAB;
shdr->sh_flags = 0;
shdr->sh_entsize = 0;
/*
* setup build-id section
*/
scn = elf_newscn(e);
if (!scn) {
warnx("cannot create section");
goto error;
}
d = elf_newdata(scn);
if (!d) {
warnx("cannot get new data");
goto error;
}
/*
* build-id generation
*/
gen_build_id(&bnote, load_addr, code, csize);
bnote.desc.namesz = sizeof(bnote.name); /* must include 0 termination */
bnote.desc.descsz = sizeof(bnote.build_id);
bnote.desc.type = NT_GNU_BUILD_ID;
strcpy(bnote.name, "GNU");
d->d_align = 4;
d->d_off = 0LL;
d->d_buf = &bnote;
d->d_type = ELF_T_BYTE;
d->d_size = sizeof(bnote);
d->d_version = EV_CURRENT;
shdr = elf_getshdr(scn);
if (!shdr) {
warnx("cannot get section header");
goto error;
}
shdr->sh_name = 33; /* offset in shd_string_table */
shdr->sh_type = SHT_NOTE;
shdr->sh_addr = 0x0;
shdr->sh_flags = SHF_ALLOC;
shdr->sh_size = sizeof(bnote);
shdr->sh_entsize = 0;
#ifdef HAVE_DWARF_SUPPORT
if (debug && nr_debug_entries) {
retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries);
if (retval)
goto error;
} else
#endif
{
if (elf_update(e, ELF_C_WRITE) < 0) {
warnx("elf_update 4 failed");
goto error;
}
}
retval = 0;
error:
(void)elf_end(e);
free(strsym);
return retval;
}
| linux-master | tools/perf/util/genelf.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "util/cgroup.h"
#include "util/data.h"
#include "util/debug.h"
#include "util/dso.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/map_symbol.h"
#include "util/branch.h"
#include "util/memswap.h"
#include "util/namespaces.h"
#include "util/session.h"
#include "util/stat.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/target.h"
#include "util/time-utils.h"
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <linux/perf_event.h>
#include <asm/bug.h>
#include <perf/evsel.h>
#include <perf/cpumap.h>
#include <internal/lib.h> // page_size
#include <internal/threadmap.h>
#include <perf/threadmap.h>
#include <symbol/kallsyms.h>
#include <dirent.h>
#include <errno.h>
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <api/fs/fs.h>
#include <api/io.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
int perf_tool__process_synth_event(struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
perf_event__handler_t process)
{
struct perf_sample synth_sample = {
.pid = -1,
.tid = -1,
.time = -1,
.stream_id = -1,
.cpu = -1,
.period = 1,
.cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
};
return process(tool, event, &synth_sample, machine);
};
/*
* Assumes that the first 4095 bytes of /proc/pid/stat contains
* the comm, tgid and ppid.
*/
static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
pid_t *tgid, pid_t *ppid, bool *kernel)
{
char bf[4096];
int fd;
size_t size = 0;
ssize_t n;
char *name, *tgids, *ppids, *vmpeak, *threads;
*tgid = -1;
*ppid = -1;
if (pid)
snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
else
snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
fd = open(bf, O_RDONLY);
if (fd < 0) {
pr_debug("couldn't open %s\n", bf);
return -1;
}
n = read(fd, bf, sizeof(bf) - 1);
close(fd);
if (n <= 0) {
pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
tid);
return -1;
}
bf[n] = '\0';
name = strstr(bf, "Name:");
tgids = strstr(name ?: bf, "Tgid:");
ppids = strstr(tgids ?: bf, "PPid:");
vmpeak = strstr(ppids ?: bf, "VmPeak:");
if (vmpeak)
threads = NULL;
else
threads = strstr(ppids ?: bf, "Threads:");
if (name) {
char *nl;
name = skip_spaces(name + 5); /* strlen("Name:") */
nl = strchr(name, '\n');
if (nl)
*nl = '\0';
size = strlen(name);
if (size >= len)
size = len - 1;
memcpy(comm, name, size);
comm[size] = '\0';
} else {
pr_debug("Name: string not found for pid %d\n", tid);
}
if (tgids) {
tgids += 5; /* strlen("Tgid:") */
*tgid = atoi(tgids);
} else {
pr_debug("Tgid: string not found for pid %d\n", tid);
}
if (ppids) {
ppids += 5; /* strlen("PPid:") */
*ppid = atoi(ppids);
} else {
pr_debug("PPid: string not found for pid %d\n", tid);
}
if (!vmpeak && threads)
*kernel = true;
else
*kernel = false;
return 0;
}
static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
struct machine *machine,
pid_t *tgid, pid_t *ppid, bool *kernel)
{
size_t size;
*ppid = -1;
memset(&event->comm, 0, sizeof(event->comm));
if (machine__is_host(machine)) {
if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
sizeof(event->comm.comm),
tgid, ppid, kernel) != 0) {
return -1;
}
} else {
*tgid = machine->pid;
}
if (*tgid < 0)
return -1;
event->comm.pid = *tgid;
event->comm.header.type = PERF_RECORD_COMM;
size = strlen(event->comm.comm) + 1;
size = PERF_ALIGN(size, sizeof(u64));
memset(event->comm.comm + size, 0, machine->id_hdr_size);
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
machine->id_hdr_size);
event->comm.tid = tid;
return 0;
}
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
union perf_event *event, pid_t pid,
perf_event__handler_t process,
struct machine *machine)
{
pid_t tgid, ppid;
bool kernel_thread;
if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
&kernel_thread) != 0)
return -1;
if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
return -1;
return tgid;
}
static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
struct perf_ns_link_info *ns_link_info)
{
struct stat64 st;
char proc_ns[128];
sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
if (stat64(proc_ns, &st) == 0) {
ns_link_info->dev = st.st_dev;
ns_link_info->ino = st.st_ino;
}
}
int perf_event__synthesize_namespaces(struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
struct machine *machine)
{
u32 idx;
struct perf_ns_link_info *ns_link_info;
if (!tool || !tool->namespace_events)
return 0;
memset(&event->namespaces, 0, (sizeof(event->namespaces) +
(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
machine->id_hdr_size));
event->namespaces.pid = tgid;
event->namespaces.tid = pid;
event->namespaces.nr_namespaces = NR_NAMESPACES;
ns_link_info = event->namespaces.link_info;
for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
perf_event__get_ns_link_info(pid, perf_ns__name(idx),
&ns_link_info[idx]);
event->namespaces.header.type = PERF_RECORD_NAMESPACES;
event->namespaces.header.size = (sizeof(event->namespaces) +
(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
machine->id_hdr_size);
if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
return -1;
return 0;
}
static int perf_event__synthesize_fork(struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid, pid_t ppid,
perf_event__handler_t process,
struct machine *machine)
{
memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
/*
* for main thread set parent to ppid from status file. For other
* threads set parent pid to main thread. ie., assume main thread
* spawns all threads in a process
*/
if (tgid == pid) {
event->fork.ppid = ppid;
event->fork.ptid = ppid;
} else {
event->fork.ppid = tgid;
event->fork.ptid = tgid;
}
event->fork.pid = tgid;
event->fork.tid = pid;
event->fork.header.type = PERF_RECORD_FORK;
event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
return -1;
return 0;
}
static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
u32 *prot, u32 *flags, __u64 *offset,
u32 *maj, u32 *min,
__u64 *inode,
ssize_t pathname_size, char *pathname)
{
__u64 temp;
int ch;
char *start_pathname = pathname;
if (io__get_hex(io, start) != '-')
return false;
if (io__get_hex(io, end) != ' ')
return false;
/* map protection and flags bits */
*prot = 0;
ch = io__get_char(io);
if (ch == 'r')
*prot |= PROT_READ;
else if (ch != '-')
return false;
ch = io__get_char(io);
if (ch == 'w')
*prot |= PROT_WRITE;
else if (ch != '-')
return false;
ch = io__get_char(io);
if (ch == 'x')
*prot |= PROT_EXEC;
else if (ch != '-')
return false;
ch = io__get_char(io);
if (ch == 's')
*flags = MAP_SHARED;
else if (ch == 'p')
*flags = MAP_PRIVATE;
else
return false;
if (io__get_char(io) != ' ')
return false;
if (io__get_hex(io, offset) != ' ')
return false;
if (io__get_hex(io, &temp) != ':')
return false;
*maj = temp;
if (io__get_hex(io, &temp) != ' ')
return false;
*min = temp;
ch = io__get_dec(io, inode);
if (ch != ' ') {
*pathname = '\0';
return ch == '\n';
}
do {
ch = io__get_char(io);
} while (ch == ' ');
while (true) {
if (ch < 0)
return false;
if (ch == '\0' || ch == '\n' ||
(pathname + 1 - start_pathname) >= pathname_size) {
*pathname = '\0';
return true;
}
*pathname++ = ch;
ch = io__get_char(io);
}
}
static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
struct machine *machine,
bool is_kernel)
{
struct build_id bid;
struct nsinfo *nsi;
struct nscookie nc;
struct dso *dso = NULL;
struct dso_id id;
int rc;
if (is_kernel) {
rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
goto out;
}
id.maj = event->maj;
id.min = event->min;
id.ino = event->ino;
id.ino_generation = event->ino_generation;
dso = dsos__findnew_id(&machine->dsos, event->filename, &id);
if (dso && dso->has_build_id) {
bid = dso->bid;
rc = 0;
goto out;
}
nsi = nsinfo__new(event->pid);
nsinfo__mountns_enter(nsi, &nc);
rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
nsinfo__mountns_exit(&nc);
nsinfo__put(nsi);
out:
if (rc == 0) {
memcpy(event->build_id, bid.data, sizeof(bid.data));
event->build_id_size = (u8) bid.size;
event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
event->__reserved_1 = 0;
event->__reserved_2 = 0;
if (dso && !dso->has_build_id)
dso__set_build_id(dso, &bid);
} else {
if (event->filename[0] == '/') {
pr_debug2("Failed to read build ID for %s\n",
event->filename);
}
}
dso__put(dso);
}
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
struct machine *machine,
bool mmap_data)
{
unsigned long long t;
char bf[BUFSIZ];
struct io io;
bool truncation = false;
unsigned long long timeout = proc_map_timeout * 1000000ULL;
int rc = 0;
const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
if (machine__is_default_guest(machine))
return 0;
snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
machine->root_dir, pid, pid);
io.fd = open(bf, O_RDONLY, 0);
if (io.fd < 0) {
/*
* We raced with a task exiting - just return:
*/
pr_debug("couldn't open %s\n", bf);
return -1;
}
io__init(&io, io.fd, bf, sizeof(bf));
event->header.type = PERF_RECORD_MMAP2;
t = rdclock();
while (!io.eof) {
static const char anonstr[] = "//anon";
size_t size, aligned_size;
/* ensure null termination since stack will be reused. */
event->mmap2.filename[0] = '\0';
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
if (!read_proc_maps_line(&io,
&event->mmap2.start,
&event->mmap2.len,
&event->mmap2.prot,
&event->mmap2.flags,
&event->mmap2.pgoff,
&event->mmap2.maj,
&event->mmap2.min,
&event->mmap2.ino,
sizeof(event->mmap2.filename),
event->mmap2.filename))
continue;
if ((rdclock() - t) > timeout) {
pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
"You may want to increase "
"the time limit by --proc-map-timeout\n",
machine->root_dir, pid, pid);
truncation = true;
goto out;
}
event->mmap2.ino_generation = 0;
/*
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
*/
if (machine__is_host(machine))
event->header.misc = PERF_RECORD_MISC_USER;
else
event->header.misc = PERF_RECORD_MISC_GUEST_USER;
if ((event->mmap2.prot & PROT_EXEC) == 0) {
if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
continue;
event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
}
out:
if (truncation)
event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
if (!strcmp(event->mmap2.filename, ""))
strcpy(event->mmap2.filename, anonstr);
if (hugetlbfs_mnt_len &&
!strncmp(event->mmap2.filename, hugetlbfs_mnt,
hugetlbfs_mnt_len)) {
strcpy(event->mmap2.filename, anonstr);
event->mmap2.flags |= MAP_HUGETLB;
}
size = strlen(event->mmap2.filename) + 1;
aligned_size = PERF_ALIGN(size, sizeof(u64));
event->mmap2.len -= event->mmap.start;
event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - aligned_size));
memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
(aligned_size - size));
event->mmap2.header.size += machine->id_hdr_size;
event->mmap2.pid = tgid;
event->mmap2.tid = pid;
if (symbol_conf.buildid_mmap2)
perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
rc = -1;
break;
}
if (truncation)
break;
}
close(io.fd);
return rc;
}
#ifdef HAVE_FILE_HANDLE
static int perf_event__synthesize_cgroup(struct perf_tool *tool,
union perf_event *event,
char *path, size_t mount_len,
perf_event__handler_t process,
struct machine *machine)
{
size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
size_t path_len = strlen(path) - mount_len + 1;
struct {
struct file_handle fh;
uint64_t cgroup_id;
} handle;
int mount_id;
while (path_len % sizeof(u64))
path[mount_len + path_len++] = '\0';
memset(&event->cgroup, 0, event_size);
event->cgroup.header.type = PERF_RECORD_CGROUP;
event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
handle.fh.handle_bytes = sizeof(handle.cgroup_id);
if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
pr_debug("stat failed: %s\n", path);
return -1;
}
event->cgroup.id = handle.cgroup_id;
strncpy(event->cgroup.path, path + mount_len, path_len);
memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
pr_debug("process synth event failed\n");
return -1;
}
return 0;
}
static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
union perf_event *event,
char *path, size_t mount_len,
perf_event__handler_t process,
struct machine *machine)
{
size_t pos = strlen(path);
DIR *d;
struct dirent *dent;
int ret = 0;
if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
process, machine) < 0)
return -1;
d = opendir(path);
if (d == NULL) {
pr_debug("failed to open directory: %s\n", path);
return -1;
}
while ((dent = readdir(d)) != NULL) {
if (dent->d_type != DT_DIR)
continue;
if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, ".."))
continue;
/* any sane path should be less than PATH_MAX */
if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
continue;
if (path[pos - 1] != '/')
strcat(path, "/");
strcat(path, dent->d_name);
ret = perf_event__walk_cgroup_tree(tool, event, path,
mount_len, process, machine);
if (ret < 0)
break;
path[pos] = '\0';
}
closedir(d);
return ret;
}
int perf_event__synthesize_cgroups(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
union perf_event event;
char cgrp_root[PATH_MAX];
size_t mount_len; /* length of mount point in the path */
if (!tool || !tool->cgroup_events)
return 0;
if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
pr_debug("cannot find cgroup mount point\n");
return -1;
}
mount_len = strlen(cgrp_root);
/* make sure the path starts with a slash (after mount point) */
strcat(cgrp_root, "/");
if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
process, machine) < 0)
return -1;
return 0;
}
#else
int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused)
{
return -1;
}
#endif
int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
struct machine *machine)
{
int rc = 0;
struct map_rb_node *pos;
struct maps *maps = machine__kernel_maps(machine);
union perf_event *event;
size_t size = symbol_conf.buildid_mmap2 ?
sizeof(event->mmap2) : sizeof(event->mmap);
event = zalloc(size + machine->id_hdr_size);
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
return -1;
}
/*
* kernel uses 0 for user space maps, see kernel/perf_event.c
* __perf_event_mmap
*/
if (machine__is_host(machine))
event->header.misc = PERF_RECORD_MISC_KERNEL;
else
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
maps__for_each_entry(maps, pos) {
struct map *map = pos->map;
struct dso *dso;
if (!__map__is_kmodule(map))
continue;
dso = map__dso(map);
if (symbol_conf.buildid_mmap2) {
size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size));
memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
event->mmap2.header.size += machine->id_hdr_size;
event->mmap2.start = map__start(map);
event->mmap2.len = map__size(map);
event->mmap2.pid = machine->pid;
memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
} else {
size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
memset(event->mmap.filename + size, 0, machine->id_hdr_size);
event->mmap.header.size += machine->id_hdr_size;
event->mmap.start = map__start(map);
event->mmap.len = map__size(map);
event->mmap.pid = machine->pid;
memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
}
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
rc = -1;
break;
}
}
free(event);
return rc;
}
static int filter_task(const struct dirent *dirent)
{
return isdigit(dirent->d_name[0]);
}
static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *mmap_event,
union perf_event *fork_event,
union perf_event *namespaces_event,
pid_t pid, int full, perf_event__handler_t process,
struct perf_tool *tool, struct machine *machine,
bool needs_mmap, bool mmap_data)
{
char filename[PATH_MAX];
struct dirent **dirent;
pid_t tgid, ppid;
int rc = 0;
int i, n;
/* special case: only send one comm event using passed in pid */
if (!full) {
tgid = perf_event__synthesize_comm(tool, comm_event, pid,
process, machine);
if (tgid == -1)
return -1;
if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
tgid, process, machine) < 0)
return -1;
/*
* send mmap only for thread group leader
* see thread__init_maps()
*/
if (pid == tgid && needs_mmap &&
perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data))
return -1;
return 0;
}
if (machine__is_default_guest(machine))
return 0;
snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid);
n = scandir(filename, &dirent, filter_task, NULL);
if (n < 0)
return n;
for (i = 0; i < n; i++) {
char *end;
pid_t _pid;
bool kernel_thread = false;
_pid = strtol(dirent[i]->d_name, &end, 10);
if (*end)
continue;
/* some threads may exit just after scan, ignore it */
if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
&tgid, &ppid, &kernel_thread) != 0)
continue;
rc = -1;
if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
ppid, process, machine) < 0)
break;
if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
tgid, process, machine) < 0)
break;
/*
* Send the prepared comm event
*/
if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
break;
rc = 0;
if (_pid == pid && !kernel_thread && needs_mmap) {
/* process the parent's maps too */
rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data);
if (rc)
break;
}
}
for (i = 0; i < n; i++)
zfree(&dirent[i]);
free(dirent);
return rc;
}
int perf_event__synthesize_thread_map(struct perf_tool *tool,
struct perf_thread_map *threads,
perf_event__handler_t process,
struct machine *machine,
bool needs_mmap, bool mmap_data)
{
union perf_event *comm_event, *mmap_event, *fork_event;
union perf_event *namespaces_event;
int err = -1, thread, j;
comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
goto out;
mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
if (fork_event == NULL)
goto out_free_mmap;
namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
machine->id_hdr_size);
if (namespaces_event == NULL)
goto out_free_fork;
err = 0;
for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event,
fork_event, namespaces_event,
perf_thread_map__pid(threads, thread), 0,
process, tool, machine,
needs_mmap, mmap_data)) {
err = -1;
break;
}
/*
* comm.pid is set to thread group id by
* perf_event__synthesize_comm
*/
if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
bool need_leader = true;
/* is thread group leader in thread_map? */
for (j = 0; j < threads->nr; ++j) {
if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
need_leader = false;
break;
}
}
/* if not, generate events for it */
if (need_leader &&
__event__synthesize_thread(comm_event, mmap_event,
fork_event, namespaces_event,
comm_event->comm.pid, 0,
process, tool, machine,
needs_mmap, mmap_data)) {
err = -1;
break;
}
}
}
free(namespaces_event);
out_free_fork:
free(fork_event);
out_free_mmap:
free(mmap_event);
out_free_comm:
free(comm_event);
out:
return err;
}
static int __perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool needs_mmap,
bool mmap_data,
struct dirent **dirent,
int start,
int num)
{
union perf_event *comm_event, *mmap_event, *fork_event;
union perf_event *namespaces_event;
int err = -1;
char *end;
pid_t pid;
int i;
comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
goto out;
mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
if (fork_event == NULL)
goto out_free_mmap;
namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
machine->id_hdr_size);
if (namespaces_event == NULL)
goto out_free_fork;
for (i = start; i < start + num; i++) {
if (!isdigit(dirent[i]->d_name[0]))
continue;
pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
/* only interested in proper numerical dirents */
if (*end)
continue;
/*
* We may race with exiting thread, so don't stop just because
* one thread couldn't be synthesized.
*/
__event__synthesize_thread(comm_event, mmap_event, fork_event,
namespaces_event, pid, 1, process,
tool, machine, needs_mmap, mmap_data);
}
err = 0;
free(namespaces_event);
out_free_fork:
free(fork_event);
out_free_mmap:
free(mmap_event);
out_free_comm:
free(comm_event);
out:
return err;
}
struct synthesize_threads_arg {
struct perf_tool *tool;
perf_event__handler_t process;
struct machine *machine;
bool needs_mmap;
bool mmap_data;
struct dirent **dirent;
int num;
int start;
};
static void *synthesize_threads_worker(void *arg)
{
struct synthesize_threads_arg *args = arg;
__perf_event__synthesize_threads(args->tool, args->process,
args->machine,
args->needs_mmap, args->mmap_data,
args->dirent,
args->start, args->num);
return NULL;
}
int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool needs_mmap, bool mmap_data,
unsigned int nr_threads_synthesize)
{
struct synthesize_threads_arg *args = NULL;
pthread_t *synthesize_threads = NULL;
char proc_path[PATH_MAX];
struct dirent **dirent;
int num_per_thread;
int m, n, i, j;
int thread_nr;
int base = 0;
int err = -1;
if (machine__is_default_guest(machine))
return 0;
snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
n = scandir(proc_path, &dirent, filter_task, NULL);
if (n < 0)
return err;
if (nr_threads_synthesize == UINT_MAX)
thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
else
thread_nr = nr_threads_synthesize;
if (thread_nr <= 1) {
err = __perf_event__synthesize_threads(tool, process,
machine,
needs_mmap, mmap_data,
dirent, base, n);
goto free_dirent;
}
if (thread_nr > n)
thread_nr = n;
synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
if (synthesize_threads == NULL)
goto free_dirent;
args = calloc(sizeof(*args), thread_nr);
if (args == NULL)
goto free_threads;
num_per_thread = n / thread_nr;
m = n % thread_nr;
for (i = 0; i < thread_nr; i++) {
args[i].tool = tool;
args[i].process = process;
args[i].machine = machine;
args[i].needs_mmap = needs_mmap;
args[i].mmap_data = mmap_data;
args[i].dirent = dirent;
}
for (i = 0; i < m; i++) {
args[i].num = num_per_thread + 1;
args[i].start = i * args[i].num;
}
if (i != 0)
base = args[i-1].start + args[i-1].num;
for (j = i; j < thread_nr; j++) {
args[j].num = num_per_thread;
args[j].start = base + (j - i) * args[i].num;
}
for (i = 0; i < thread_nr; i++) {
if (pthread_create(&synthesize_threads[i], NULL,
synthesize_threads_worker, &args[i]))
goto out_join;
}
err = 0;
out_join:
for (i = 0; i < thread_nr; i++)
pthread_join(synthesize_threads[i], NULL);
free(args);
free_threads:
free(synthesize_threads);
free_dirent:
for (i = 0; i < n; i++)
zfree(&dirent[i]);
free(dirent);
return err;
}
int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
union perf_event *event;
size_t size = symbol_conf.buildid_mmap2 ?
sizeof(event->mmap2) : sizeof(event->mmap);
struct map *map = machine__kernel_map(machine);
struct kmap *kmap;
int err;
if (map == NULL)
return -1;
kmap = map__kmap(map);
if (!kmap->ref_reloc_sym)
return -1;
/*
* We should get this from /sys/kernel/sections/.text, but till that is
* available use this, and after it is use this as a fallback for older
* kernels.
*/
event = zalloc(size + machine->id_hdr_size);
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
return -1;
}
if (machine__is_host(machine)) {
/*
* kernel uses PERF_RECORD_MISC_USER for user space maps,
* see kernel/perf_event.c __perf_event_mmap
*/
event->header.misc = PERF_RECORD_MISC_KERNEL;
} else {
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
}
if (symbol_conf.buildid_mmap2) {
size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
size = PERF_ALIGN(size, sizeof(u64));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
event->mmap2.start = map__start(map);
event->mmap2.len = map__end(map) - event->mmap.start;
event->mmap2.pid = machine->pid;
perf_record_mmap2__read_build_id(&event->mmap2, machine, true);
} else {
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
size = PERF_ALIGN(size, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
event->mmap.pgoff = kmap->ref_reloc_sym->addr;
event->mmap.start = map__start(map);
event->mmap.len = map__end(map) - event->mmap.start;
event->mmap.pid = machine->pid;
}
err = perf_tool__process_synth_event(tool, event, machine, process);
free(event);
return err;
}
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
int err;
err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
if (err < 0)
return err;
return perf_event__synthesize_extra_kmaps(tool, process, machine);
}
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
struct perf_thread_map *threads,
perf_event__handler_t process,
struct machine *machine)
{
union perf_event *event;
int i, err, size;
size = sizeof(event->thread_map);
size += threads->nr * sizeof(event->thread_map.entries[0]);
event = zalloc(size);
if (!event)
return -ENOMEM;
event->header.type = PERF_RECORD_THREAD_MAP;
event->header.size = size;
event->thread_map.nr = threads->nr;
for (i = 0; i < threads->nr; i++) {
struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
char *comm = perf_thread_map__comm(threads, i);
if (!comm)
comm = (char *) "";
entry->pid = perf_thread_map__pid(threads, i);
strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
}
err = process(tool, event, NULL, machine);
free(event);
return err;
}
struct synthesize_cpu_map_data {
const struct perf_cpu_map *map;
int nr;
int min_cpu;
int max_cpu;
int has_any_cpu;
int type;
size_t size;
struct perf_record_cpu_map_data *data;
};
static void synthesize_cpus(struct synthesize_cpu_map_data *data)
{
data->data->type = PERF_CPU_MAP__CPUS;
data->data->cpus_data.nr = data->nr;
for (int i = 0; i < data->nr; i++)
data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
}
static void synthesize_mask(struct synthesize_cpu_map_data *data)
{
int idx;
struct perf_cpu cpu;
/* Due to padding, the 4bytes per entry mask variant is always smaller. */
data->data->type = PERF_CPU_MAP__MASK;
data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu);
data->data->mask32_data.long_size = 4;
perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
int bit_word = cpu.cpu / 32;
u32 bit_mask = 1U << (cpu.cpu & 31);
data->data->mask32_data.mask[bit_word] |= bit_mask;
}
}
static void synthesize_range_cpus(struct synthesize_cpu_map_data *data)
{
data->data->type = PERF_CPU_MAP__RANGE_CPUS;
data->data->range_cpu_data.any_cpu = data->has_any_cpu;
data->data->range_cpu_data.start_cpu = data->min_cpu;
data->data->range_cpu_data.end_cpu = data->max_cpu;
}
static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data,
size_t header_size)
{
size_t size_cpus, size_mask;
syn_data->nr = perf_cpu_map__nr(syn_data->map);
syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) {
/* A consecutive range of CPUs can be encoded using a range. */
assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64));
syn_data->type = PERF_CPU_MAP__RANGE_CPUS;
syn_data->size = header_size + sizeof(u64);
return zalloc(syn_data->size);
}
size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16);
/* Due to padding, the 4bytes per entry mask variant is always smaller. */
size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) +
BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32);
if (syn_data->has_any_cpu || size_cpus < size_mask) {
/* Follow the CPU map encoding. */
syn_data->type = PERF_CPU_MAP__CPUS;
syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64));
return zalloc(syn_data->size);
}
/* Encode using a bitmask. */
syn_data->type = PERF_CPU_MAP__MASK;
syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64));
return zalloc(syn_data->size);
}
static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data)
{
switch (data->type) {
case PERF_CPU_MAP__CPUS:
synthesize_cpus(data);
break;
case PERF_CPU_MAP__MASK:
synthesize_mask(data);
break;
case PERF_CPU_MAP__RANGE_CPUS:
synthesize_range_cpus(data);
break;
default:
break;
}
}
static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
{
struct synthesize_cpu_map_data syn_data = { .map = map };
struct perf_record_cpu_map *event;
event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header));
if (!event)
return NULL;
syn_data.data = &event->data;
event->header.type = PERF_RECORD_CPU_MAP;
event->header.size = syn_data.size;
cpu_map_data__synthesize(&syn_data);
return event;
}
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
const struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
{
struct perf_record_cpu_map *event;
int err;
event = cpu_map_event__new(map);
if (!event)
return -ENOMEM;
err = process(tool, (union perf_event *) event, NULL, machine);
free(event);
return err;
}
int perf_event__synthesize_stat_config(struct perf_tool *tool,
struct perf_stat_config *config,
perf_event__handler_t process,
struct machine *machine)
{
struct perf_record_stat_config *event;
int size, i = 0, err;
size = sizeof(*event);
size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
event = zalloc(size);
if (!event)
return -ENOMEM;
event->header.type = PERF_RECORD_STAT_CONFIG;
event->header.size = size;
event->nr = PERF_STAT_CONFIG_TERM__MAX;
#define ADD(__term, __val) \
event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
event->data[i].val = __val; \
i++;
ADD(AGGR_MODE, config->aggr_mode)
ADD(INTERVAL, config->interval)
ADD(SCALE, config->scale)
ADD(AGGR_LEVEL, config->aggr_level)
WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
"stat config terms unbalanced\n");
#undef ADD
err = process(tool, (union perf_event *) event, NULL, machine);
free(event);
return err;
}
int perf_event__synthesize_stat(struct perf_tool *tool,
struct perf_cpu cpu, u32 thread, u64 id,
struct perf_counts_values *count,
perf_event__handler_t process,
struct machine *machine)
{
struct perf_record_stat event;
event.header.type = PERF_RECORD_STAT;
event.header.size = sizeof(event);
event.header.misc = 0;
event.id = id;
event.cpu = cpu.cpu;
event.thread = thread;
event.val = count->val;
event.ena = count->ena;
event.run = count->run;
return process(tool, (union perf_event *) &event, NULL, machine);
}
int perf_event__synthesize_stat_round(struct perf_tool *tool,
u64 evtime, u64 type,
perf_event__handler_t process,
struct machine *machine)
{
struct perf_record_stat_round event;
event.header.type = PERF_RECORD_STAT_ROUND;
event.header.size = sizeof(event);
event.header.misc = 0;
event.time = evtime;
event.type = type;
return process(tool, (union perf_event *) &event, NULL, machine);
}
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
{
size_t sz, result = sizeof(struct perf_record_sample);
if (type & PERF_SAMPLE_IDENTIFIER)
result += sizeof(u64);
if (type & PERF_SAMPLE_IP)
result += sizeof(u64);
if (type & PERF_SAMPLE_TID)
result += sizeof(u64);
if (type & PERF_SAMPLE_TIME)
result += sizeof(u64);
if (type & PERF_SAMPLE_ADDR)
result += sizeof(u64);
if (type & PERF_SAMPLE_ID)
result += sizeof(u64);
if (type & PERF_SAMPLE_STREAM_ID)
result += sizeof(u64);
if (type & PERF_SAMPLE_CPU)
result += sizeof(u64);
if (type & PERF_SAMPLE_PERIOD)
result += sizeof(u64);
if (type & PERF_SAMPLE_READ) {
result += sizeof(u64);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
result += sizeof(u64);
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
result += sizeof(u64);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
sz = sample_read_value_size(read_format);
result += sz * sample->read.group.nr;
} else {
result += sizeof(u64);
if (read_format & PERF_FORMAT_LOST)
result += sizeof(u64);
}
}
if (type & PERF_SAMPLE_CALLCHAIN) {
sz = (sample->callchain->nr + 1) * sizeof(u64);
result += sz;
}
if (type & PERF_SAMPLE_RAW) {
result += sizeof(u32);
result += sample->raw_size;
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
sz = sample->branch_stack->nr * sizeof(struct branch_entry);
/* nr, hw_idx */
sz += 2 * sizeof(u64);
result += sz;
}
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
result += sizeof(u64);
sz = hweight64(sample->user_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
}
}
if (type & PERF_SAMPLE_STACK_USER) {
sz = sample->user_stack.size;
result += sizeof(u64);
if (sz) {
result += sz;
result += sizeof(u64);
}
}
if (type & PERF_SAMPLE_WEIGHT_TYPE)
result += sizeof(u64);
if (type & PERF_SAMPLE_DATA_SRC)
result += sizeof(u64);
if (type & PERF_SAMPLE_TRANSACTION)
result += sizeof(u64);
if (type & PERF_SAMPLE_REGS_INTR) {
if (sample->intr_regs.abi) {
result += sizeof(u64);
sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
}
}
if (type & PERF_SAMPLE_PHYS_ADDR)
result += sizeof(u64);
if (type & PERF_SAMPLE_CGROUP)
result += sizeof(u64);
if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
result += sizeof(u64);
if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
result += sizeof(u64);
if (type & PERF_SAMPLE_AUX) {
result += sizeof(u64);
result += sample->aux_sample.size;
}
return result;
}
void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
__u64 *array, u64 type __maybe_unused)
{
*array = data->weight;
}
static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
const struct perf_sample *sample)
{
size_t sz = sample_read_value_size(read_format);
struct sample_read_value *v = sample->read.group.values;
sample_read_group__for_each(v, sample->read.group.nr, read_format) {
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
memcpy(array, v, sz);
array = (void *)array + sz;
}
return array;
}
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
const struct perf_sample *sample)
{
__u64 *array;
size_t sz;
/*
* used for cross-endian analysis. See git commit 65014ab3
* for why this goofiness is needed.
*/
union u64_swap u;
array = event->sample.array;
if (type & PERF_SAMPLE_IDENTIFIER) {
*array = sample->id;
array++;
}
if (type & PERF_SAMPLE_IP) {
*array = sample->ip;
array++;
}
if (type & PERF_SAMPLE_TID) {
u.val32[0] = sample->pid;
u.val32[1] = sample->tid;
*array = u.val64;
array++;
}
if (type & PERF_SAMPLE_TIME) {
*array = sample->time;
array++;
}
if (type & PERF_SAMPLE_ADDR) {
*array = sample->addr;
array++;
}
if (type & PERF_SAMPLE_ID) {
*array = sample->id;
array++;
}
if (type & PERF_SAMPLE_STREAM_ID) {
*array = sample->stream_id;
array++;
}
if (type & PERF_SAMPLE_CPU) {
u.val32[0] = sample->cpu;
u.val32[1] = 0;
*array = u.val64;
array++;
}
if (type & PERF_SAMPLE_PERIOD) {
*array = sample->period;
array++;
}
if (type & PERF_SAMPLE_READ) {
if (read_format & PERF_FORMAT_GROUP)
*array = sample->read.group.nr;
else
*array = sample->read.one.value;
array++;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
*array = sample->read.time_enabled;
array++;
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
*array = sample->read.time_running;
array++;
}
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
array = copy_read_group_values(array, read_format,
sample);
} else {
*array = sample->read.one.id;
array++;
if (read_format & PERF_FORMAT_LOST) {
*array = sample->read.one.lost;
array++;
}
}
}
if (type & PERF_SAMPLE_CALLCHAIN) {
sz = (sample->callchain->nr + 1) * sizeof(u64);
memcpy(array, sample->callchain, sz);
array = (void *)array + sz;
}
if (type & PERF_SAMPLE_RAW) {
u.val32[0] = sample->raw_size;
*array = u.val64;
array = (void *)array + sizeof(u32);
memcpy(array, sample->raw_data, sample->raw_size);
array = (void *)array + sample->raw_size;
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
sz = sample->branch_stack->nr * sizeof(struct branch_entry);
/* nr, hw_idx */
sz += 2 * sizeof(u64);
memcpy(array, sample->branch_stack, sz);
array = (void *)array + sz;
}
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
*array++ = sample->user_regs.abi;
sz = hweight64(sample->user_regs.mask) * sizeof(u64);
memcpy(array, sample->user_regs.regs, sz);
array = (void *)array + sz;
} else {
*array++ = 0;
}
}
if (type & PERF_SAMPLE_STACK_USER) {
sz = sample->user_stack.size;
*array++ = sz;
if (sz) {
memcpy(array, sample->user_stack.data, sz);
array = (void *)array + sz;
*array++ = sz;
}
}
if (type & PERF_SAMPLE_WEIGHT_TYPE) {
arch_perf_synthesize_sample_weight(sample, array, type);
array++;
}
if (type & PERF_SAMPLE_DATA_SRC) {
*array = sample->data_src;
array++;
}
if (type & PERF_SAMPLE_TRANSACTION) {
*array = sample->transaction;
array++;
}
if (type & PERF_SAMPLE_REGS_INTR) {
if (sample->intr_regs.abi) {
*array++ = sample->intr_regs.abi;
sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
memcpy(array, sample->intr_regs.regs, sz);
array = (void *)array + sz;
} else {
*array++ = 0;
}
}
if (type & PERF_SAMPLE_PHYS_ADDR) {
*array = sample->phys_addr;
array++;
}
if (type & PERF_SAMPLE_CGROUP) {
*array = sample->cgroup;
array++;
}
if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
*array = sample->data_page_size;
array++;
}
if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
*array = sample->code_page_size;
array++;
}
if (type & PERF_SAMPLE_AUX) {
sz = sample->aux_sample.size;
*array++ = sz;
memcpy(array, sample->aux_sample.data, sz);
array = (void *)array + sz;
}
return 0;
}
int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
{
__u64 *start = array;
/*
* used for cross-endian analysis. See git commit 65014ab3
* for why this goofiness is needed.
*/
union u64_swap u;
if (type & PERF_SAMPLE_TID) {
u.val32[0] = sample->pid;
u.val32[1] = sample->tid;
*array = u.val64;
array++;
}
if (type & PERF_SAMPLE_TIME) {
*array = sample->time;
array++;
}
if (type & PERF_SAMPLE_ID) {
*array = sample->id;
array++;
}
if (type & PERF_SAMPLE_STREAM_ID) {
*array = sample->stream_id;
array++;
}
if (type & PERF_SAMPLE_CPU) {
u.val32[0] = sample->cpu;
u.val32[1] = 0;
*array = u.val64;
array++;
}
if (type & PERF_SAMPLE_IDENTIFIER) {
*array = sample->id;
array++;
}
return (void *)array - (void *)start;
}
int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
struct evlist *evlist, struct machine *machine, size_t from)
{
union perf_event *ev;
struct evsel *evsel;
size_t nr = 0, i = 0, sz, max_nr, n, pos;
size_t e1_sz = sizeof(struct id_index_entry);
size_t e2_sz = sizeof(struct id_index_entry_2);
size_t etot_sz = e1_sz + e2_sz;
bool e2_needed = false;
int err;
max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
pos = 0;
evlist__for_each_entry(evlist, evsel) {
if (pos++ < from)
continue;
nr += evsel->core.ids;
}
if (!nr)
return 0;
pr_debug2("Synthesizing id index\n");
n = nr > max_nr ? max_nr : nr;
sz = sizeof(struct perf_record_id_index) + n * etot_sz;
ev = zalloc(sz);
if (!ev)
return -ENOMEM;
sz = sizeof(struct perf_record_id_index) + n * e1_sz;
ev->id_index.header.type = PERF_RECORD_ID_INDEX;
ev->id_index.nr = n;
pos = 0;
evlist__for_each_entry(evlist, evsel) {
u32 j;
if (pos++ < from)
continue;
for (j = 0; j < evsel->core.ids; j++, i++) {
struct id_index_entry *e;
struct id_index_entry_2 *e2;
struct perf_sample_id *sid;
if (i >= n) {
ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
err = process(tool, ev, NULL, machine);
if (err)
goto out_err;
nr -= n;
i = 0;
e2_needed = false;
}
e = &ev->id_index.entries[i];
e->id = evsel->core.id[j];
sid = evlist__id2sid(evlist, e->id);
if (!sid) {
free(ev);
return -ENOENT;
}
e->idx = sid->idx;
e->cpu = sid->cpu.cpu;
e->tid = sid->tid;
if (sid->machine_pid)
e2_needed = true;
e2 = (void *)ev + sz;
e2[i].machine_pid = sid->machine_pid;
e2[i].vcpu = sid->vcpu.cpu;
}
}
sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
ev->id_index.nr = nr;
err = process(tool, ev, NULL, machine);
out_err:
free(ev);
return err;
}
int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
struct evlist *evlist, struct machine *machine)
{
return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
}
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
perf_event__handler_t process, bool needs_mmap,
bool data_mmap, unsigned int nr_threads_synthesize)
{
/*
* When perf runs in non-root PID namespace, and the namespace's proc FS
* is not mounted, nsinfo__is_in_root_namespace() returns false.
* In this case, the proc FS is coming for the parent namespace, thus
* perf tool will wrongly gather process info from its parent PID
* namespace.
*
* To avoid the confusion that the perf tool runs in a child PID
* namespace but it synthesizes thread info from its parent PID
* namespace, returns failure with warning.
*/
if (!nsinfo__is_in_root_namespace()) {
pr_err("Perf runs in non-root PID namespace but it tries to ");
pr_err("gather process info from its parent PID namespace.\n");
pr_err("Please mount the proc file system properly, e.g. ");
pr_err("add the option '--mount-proc' for unshare command.\n");
return -EPERM;
}
if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine,
needs_mmap, data_mmap);
else if (target__has_cpu(target))
return perf_event__synthesize_threads(tool, process, machine,
needs_mmap, data_mmap,
nr_threads_synthesize);
/* command specified */
return 0;
}
int machine__synthesize_threads(struct machine *machine, struct target *target,
struct perf_thread_map *threads, bool needs_mmap,
bool data_mmap, unsigned int nr_threads_synthesize)
{
return __machine__synthesize_threads(machine, NULL, target, threads,
perf_event__process, needs_mmap,
data_mmap, nr_threads_synthesize);
}
static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
{
struct perf_record_event_update *ev;
size += sizeof(*ev);
size = PERF_ALIGN(size, sizeof(u64));
ev = zalloc(size);
if (ev) {
ev->header.type = PERF_RECORD_EVENT_UPDATE;
ev->header.size = (u16)size;
ev->type = type;
ev->id = id;
}
return ev;
}
int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
size_t size = strlen(evsel->unit);
struct perf_record_event_update *ev;
int err;
ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
if (ev == NULL)
return -ENOMEM;
strlcpy(ev->unit, evsel->unit, size + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct perf_record_event_update *ev;
struct perf_record_event_update_scale *ev_data;
int err;
ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
if (ev == NULL)
return -ENOMEM;
ev->scale.scale = evsel->scale;
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct perf_record_event_update *ev;
size_t len = strlen(evsel__name(evsel));
int err;
ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
if (ev == NULL)
return -ENOMEM;
strlcpy(ev->name, evsel->name, len + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
struct perf_record_event_update *ev;
int err;
ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64));
if (!ev)
return -ENOMEM;
syn_data.data = &ev->cpus.cpus;
ev->header.type = PERF_RECORD_EVENT_UPDATE;
ev->header.size = (u16)syn_data.size;
ev->type = PERF_EVENT_UPDATE__CPUS;
ev->id = evsel->core.id[0];
cpu_map_data__synthesize(&syn_data);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
}
int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
perf_event__handler_t process)
{
struct evsel *evsel;
int err = 0;
evlist__for_each_entry(evlist, evsel) {
err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
evsel->core.id, process);
if (err) {
pr_debug("failed to create perf header attribute\n");
return err;
}
}
return err;
}
static bool has_unit(struct evsel *evsel)
{
return evsel->unit && *evsel->unit;
}
static bool has_scale(struct evsel *evsel)
{
return evsel->scale != 1;
}
int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
perf_event__handler_t process, bool is_pipe)
{
struct evsel *evsel;
int err;
/*
* Synthesize other events stuff not carried within
* attr event - unit, scale, name
*/
evlist__for_each_entry(evsel_list, evsel) {
if (!evsel->supported)
continue;
/*
* Synthesize unit and scale only if it's defined.
*/
if (has_unit(evsel)) {
err = perf_event__synthesize_event_update_unit(tool, evsel, process);
if (err < 0) {
pr_err("Couldn't synthesize evsel unit.\n");
return err;
}
}
if (has_scale(evsel)) {
err = perf_event__synthesize_event_update_scale(tool, evsel, process);
if (err < 0) {
pr_err("Couldn't synthesize evsel evsel.\n");
return err;
}
}
if (evsel->core.own_cpus) {
err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
if (err < 0) {
pr_err("Couldn't synthesize evsel cpus.\n");
return err;
}
}
/*
* Name is needed only for pipe output,
* perf.data carries event names.
*/
if (is_pipe) {
err = perf_event__synthesize_event_update_name(tool, evsel, process);
if (err < 0) {
pr_err("Couldn't synthesize evsel name.\n");
return err;
}
}
}
return 0;
}
int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
u32 ids, u64 *id, perf_event__handler_t process)
{
union perf_event *ev;
size_t size;
int err;
size = sizeof(struct perf_event_attr);
size = PERF_ALIGN(size, sizeof(u64));
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
ev = zalloc(size);
if (ev == NULL)
return -ENOMEM;
ev->attr.attr = *attr;
memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64));
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
ev->attr.header.size = (u16)size;
if (ev->attr.header.size == size)
err = process(tool, ev, NULL, NULL);
else
err = -E2BIG;
free(ev);
return err;
}
#ifdef HAVE_LIBTRACEEVENT
int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
perf_event__handler_t process)
{
union perf_event ev;
struct tracing_data *tdata;
ssize_t size = 0, aligned_size = 0, padding;
struct feat_fd ff;
/*
* We are going to store the size of the data followed
* by the data contents. Since the fd descriptor is a pipe,
* we cannot seek back to store the size of the data once
* we know it. Instead we:
*
* - write the tracing data to the temp file
* - get/write the data size to pipe
* - write the tracing data from the temp file
* to the pipe
*/
tdata = tracing_data_get(&evlist->core.entries, fd, true);
if (!tdata)
return -1;
memset(&ev, 0, sizeof(ev));
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
size = tdata->size;
aligned_size = PERF_ALIGN(size, sizeof(u64));
padding = aligned_size - size;
ev.tracing_data.header.size = sizeof(ev.tracing_data);
ev.tracing_data.size = aligned_size;
process(tool, &ev, NULL, NULL);
/*
* The put function will copy all the tracing data
* stored in temp file to the pipe.
*/
tracing_data_put(tdata);
ff = (struct feat_fd){ .fd = fd };
if (write_padded(&ff, NULL, 0, padding))
return -1;
return aligned_size;
}
#endif
int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
perf_event__handler_t process, struct machine *machine)
{
union perf_event ev;
size_t len;
if (!pos->hit)
return 0;
memset(&ev, 0, sizeof(ev));
len = pos->long_name_len + 1;
len = PERF_ALIGN(len, NAME_ALIGN);
ev.build_id.size = min(pos->bid.size, sizeof(pos->bid.data));
memcpy(&ev.build_id.build_id, pos->bid.data, ev.build_id.size);
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
ev.build_id.pid = machine->pid;
ev.build_id.header.size = sizeof(ev.build_id) + len;
memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
return process(tool, &ev, NULL, machine);
}
int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
struct evlist *evlist, perf_event__handler_t process, bool attrs)
{
int err;
if (attrs) {
err = perf_event__synthesize_attrs(tool, evlist, process);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
return err;
}
}
err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;
}
err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;
}
err = perf_event__synthesize_stat_config(tool, config, process, NULL);
if (err < 0) {
pr_err("Couldn't synthesize config.\n");
return err;
}
return 0;
}
extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
struct evlist *evlist, perf_event__handler_t process)
{
struct perf_header *header = &session->header;
struct perf_record_header_feature *fe;
struct feat_fd ff;
size_t sz, sz_hdr;
int feat, ret;
sz_hdr = sizeof(fe->header);
sz = sizeof(union perf_event);
/* get a nice alignment */
sz = PERF_ALIGN(sz, page_size);
memset(&ff, 0, sizeof(ff));
ff.buf = malloc(sz);
if (!ff.buf)
return -ENOMEM;
ff.size = sz - sz_hdr;
ff.ph = &session->header;
for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
if (!feat_ops[feat].synthesize) {
pr_debug("No record header feature for header :%d\n", feat);
continue;
}
ff.offset = sizeof(*fe);
ret = feat_ops[feat].write(&ff, evlist);
if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
pr_debug("Error writing feature\n");
continue;
}
/* ff.buf may have changed due to realloc in do_write() */
fe = ff.buf;
memset(fe, 0, sizeof(*fe));
fe->feat_id = feat;
fe->header.type = PERF_RECORD_HEADER_FEATURE;
fe->header.size = ff.offset;
ret = process(tool, ff.buf, NULL, NULL);
if (ret) {
free(ff.buf);
return ret;
}
}
/* Send HEADER_LAST_FEATURE mark. */
fe = ff.buf;
fe->feat_id = HEADER_LAST_FEATURE;
fe->header.type = PERF_RECORD_HEADER_FEATURE;
fe->header.size = sizeof(*fe);
ret = process(tool, ff.buf, NULL, NULL);
free(ff.buf);
return ret;
}
int perf_event__synthesize_for_pipe(struct perf_tool *tool,
struct perf_session *session,
struct perf_data *data,
perf_event__handler_t process)
{
int err;
int ret = 0;
struct evlist *evlist = session->evlist;
/*
* We need to synthesize events first, because some
* features works on top of them (on report side).
*/
err = perf_event__synthesize_attrs(tool, evlist, process);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
return err;
}
ret += err;
err = perf_event__synthesize_features(tool, session, evlist, process);
if (err < 0) {
pr_err("Couldn't synthesize features.\n");
return err;
}
ret += err;
#ifdef HAVE_LIBTRACEEVENT
if (have_tracepoints(&evlist->core.entries)) {
int fd = perf_data__fd(data);
/*
* FIXME err <= 0 here actually means that
* there were no tracepoints so its not really
* an error, just that we don't need to
* synthesize anything. We really have to
* return this more properly and also
* propagate errors that now are calling die()
*/
err = perf_event__synthesize_tracing_data(tool, fd, evlist,
process);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
return err;
}
ret += err;
}
#else
(void)data;
#endif
return ret;
}
int parse_synth_opt(char *synth)
{
char *p, *q;
int ret = 0;
if (synth == NULL)
return -1;
for (q = synth; (p = strsep(&q, ",")); p = q) {
if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
return 0;
if (!strcasecmp(p, "all"))
return PERF_SYNTH_ALL;
if (!strcasecmp(p, "task"))
ret |= PERF_SYNTH_TASK;
else if (!strcasecmp(p, "mmap"))
ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
else if (!strcasecmp(p, "cgroup"))
ret |= PERF_SYNTH_CGROUP;
else
return -1;
}
return ret;
}
| linux-master | tools/perf/util/synthetic-events.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* Generic non-thread safe hash map implementation.
*
* Copyright (c) 2019 Facebook
*/
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <linux/err.h>
#include "hashmap.h"
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
/* prevent accidental re-addition of reallocarray() */
#pragma GCC poison reallocarray
/* start with 4 buckets */
#define HASHMAP_MIN_CAP_BITS 2
static void hashmap_add_entry(struct hashmap_entry **pprev,
struct hashmap_entry *entry)
{
entry->next = *pprev;
*pprev = entry;
}
static void hashmap_del_entry(struct hashmap_entry **pprev,
struct hashmap_entry *entry)
{
*pprev = entry->next;
entry->next = NULL;
}
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn, void *ctx)
{
map->hash_fn = hash_fn;
map->equal_fn = equal_fn;
map->ctx = ctx;
map->buckets = NULL;
map->cap = 0;
map->cap_bits = 0;
map->sz = 0;
}
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn,
void *ctx)
{
struct hashmap *map = malloc(sizeof(struct hashmap));
if (!map)
return ERR_PTR(-ENOMEM);
hashmap__init(map, hash_fn, equal_fn, ctx);
return map;
}
void hashmap__clear(struct hashmap *map)
{
struct hashmap_entry *cur, *tmp;
size_t bkt;
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
free(cur);
}
free(map->buckets);
map->buckets = NULL;
map->cap = map->cap_bits = map->sz = 0;
}
void hashmap__free(struct hashmap *map)
{
if (IS_ERR_OR_NULL(map))
return;
hashmap__clear(map);
free(map);
}
size_t hashmap__size(const struct hashmap *map)
{
return map->sz;
}
size_t hashmap__capacity(const struct hashmap *map)
{
return map->cap;
}
static bool hashmap_needs_to_grow(struct hashmap *map)
{
/* grow if empty or more than 75% filled */
return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
}
static int hashmap_grow(struct hashmap *map)
{
struct hashmap_entry **new_buckets;
struct hashmap_entry *cur, *tmp;
size_t new_cap_bits, new_cap;
size_t h, bkt;
new_cap_bits = map->cap_bits + 1;
if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
new_cap_bits = HASHMAP_MIN_CAP_BITS;
new_cap = 1UL << new_cap_bits;
new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
if (!new_buckets)
return -ENOMEM;
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
hashmap_add_entry(&new_buckets[h], cur);
}
map->cap = new_cap;
map->cap_bits = new_cap_bits;
free(map->buckets);
map->buckets = new_buckets;
return 0;
}
static bool hashmap_find_entry(const struct hashmap *map,
const long key, size_t hash,
struct hashmap_entry ***pprev,
struct hashmap_entry **entry)
{
struct hashmap_entry *cur, **prev_ptr;
if (!map->buckets)
return false;
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
cur;
prev_ptr = &cur->next, cur = cur->next) {
if (map->equal_fn(cur->key, key, map->ctx)) {
if (pprev)
*pprev = prev_ptr;
*entry = cur;
return true;
}
}
return false;
}
int hashmap_insert(struct hashmap *map, long key, long value,
enum hashmap_insert_strategy strategy,
long *old_key, long *old_value)
{
struct hashmap_entry *entry;
size_t h;
int err;
if (old_key)
*old_key = 0;
if (old_value)
*old_value = 0;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (strategy != HASHMAP_APPEND &&
hashmap_find_entry(map, key, h, NULL, &entry)) {
if (old_key)
*old_key = entry->key;
if (old_value)
*old_value = entry->value;
if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
entry->key = key;
entry->value = value;
return 0;
} else if (strategy == HASHMAP_ADD) {
return -EEXIST;
}
}
if (strategy == HASHMAP_UPDATE)
return -ENOENT;
if (hashmap_needs_to_grow(map)) {
err = hashmap_grow(map);
if (err)
return err;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
}
entry = malloc(sizeof(struct hashmap_entry));
if (!entry)
return -ENOMEM;
entry->key = key;
entry->value = value;
hashmap_add_entry(&map->buckets[h], entry);
map->sz++;
return 0;
}
bool hashmap_find(const struct hashmap *map, long key, long *value)
{
struct hashmap_entry *entry;
size_t h;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (!hashmap_find_entry(map, key, h, NULL, &entry))
return false;
if (value)
*value = entry->value;
return true;
}
bool hashmap_delete(struct hashmap *map, long key,
long *old_key, long *old_value)
{
struct hashmap_entry **pprev, *entry;
size_t h;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (!hashmap_find_entry(map, key, h, &pprev, &entry))
return false;
if (old_key)
*old_key = entry->key;
if (old_value)
*old_value = entry->value;
hashmap_del_entry(pprev, entry);
free(entry);
map->sz--;
return true;
}
| linux-master | tools/perf/util/hashmap.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.