python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include "util.h"
#include "rwsem.h"
int init_rwsem(struct rw_semaphore *sem)
{
return pthread_rwlock_init(&sem->lock, NULL);
}
int exit_rwsem(struct rw_semaphore *sem)
{
return pthread_rwlock_destroy(&sem->lock);
}
int down_read(struct rw_semaphore *sem)
{
return perf_singlethreaded ? 0 : pthread_rwlock_rdlock(&sem->lock);
}
int up_read(struct rw_semaphore *sem)
{
return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
}
int down_write(struct rw_semaphore *sem)
{
return perf_singlethreaded ? 0 : pthread_rwlock_wrlock(&sem->lock);
}
int up_write(struct rw_semaphore *sem)
{
return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
}
| linux-master | tools/perf/util/rwsem.c |
// SPDX-License-Identifier: GPL-2.0
#include "comm.h"
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include <linux/zalloc.h>
#include "rwsem.h"
struct comm_str {
char *str;
struct rb_node rb_node;
refcount_t refcnt;
};
/* Should perhaps be moved to struct machine */
static struct rb_root comm_str_root;
static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,};
static struct comm_str *comm_str__get(struct comm_str *cs)
{
if (cs && refcount_inc_not_zero(&cs->refcnt))
return cs;
return NULL;
}
static void comm_str__put(struct comm_str *cs)
{
if (cs && refcount_dec_and_test(&cs->refcnt)) {
down_write(&comm_str_lock);
rb_erase(&cs->rb_node, &comm_str_root);
up_write(&comm_str_lock);
zfree(&cs->str);
free(cs);
}
}
static struct comm_str *comm_str__alloc(const char *str)
{
struct comm_str *cs;
cs = zalloc(sizeof(*cs));
if (!cs)
return NULL;
cs->str = strdup(str);
if (!cs->str) {
free(cs);
return NULL;
}
refcount_set(&cs->refcnt, 1);
return cs;
}
static
struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct comm_str *iter, *new;
int cmp;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct comm_str, rb_node);
/*
* If we race with comm_str__put, iter->refcnt is 0
* and it will be removed within comm_str__put call
* shortly, ignore it in this search.
*/
cmp = strcmp(str, iter->str);
if (!cmp && comm_str__get(iter))
return iter;
if (cmp < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
new = comm_str__alloc(str);
if (!new)
return NULL;
rb_link_node(&new->rb_node, parent, p);
rb_insert_color(&new->rb_node, root);
return new;
}
static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
{
struct comm_str *cs;
down_write(&comm_str_lock);
cs = __comm_str__findnew(str, root);
up_write(&comm_str_lock);
return cs;
}
struct comm *comm__new(const char *str, u64 timestamp, bool exec)
{
struct comm *comm = zalloc(sizeof(*comm));
if (!comm)
return NULL;
comm->start = timestamp;
comm->exec = exec;
comm->comm_str = comm_str__findnew(str, &comm_str_root);
if (!comm->comm_str) {
free(comm);
return NULL;
}
return comm;
}
int comm__override(struct comm *comm, const char *str, u64 timestamp, bool exec)
{
struct comm_str *new, *old = comm->comm_str;
new = comm_str__findnew(str, &comm_str_root);
if (!new)
return -ENOMEM;
comm_str__put(old);
comm->comm_str = new;
comm->start = timestamp;
if (exec)
comm->exec = true;
return 0;
}
void comm__free(struct comm *comm)
{
comm_str__put(comm->comm_str);
free(comm);
}
const char *comm__str(const struct comm *comm)
{
return comm->comm_str->str;
}
| linux-master | tools/perf/util/comm.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "metricgroup.h"
#include "cpumap.h"
#include "cputopo.h"
#include "debug.h"
#include "evlist.h"
#include "expr.h"
#include <util/expr-bison.h>
#include <util/expr-flex.h>
#include "util/hashmap.h"
#include "util/header.h"
#include "util/pmu.h"
#include "smt.h"
#include "tsc.h"
#include <api/fs/fs.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <ctype.h>
#include <math.h>
#include "pmu.h"
#ifdef PARSER_DEBUG
extern int expr_debug;
#endif
struct expr_id_data {
union {
struct {
double val;
int source_count;
} val;
struct {
double val;
const char *metric_name;
const char *metric_expr;
} ref;
};
enum {
/* Holding a double value. */
EXPR_ID_DATA__VALUE,
/* Reference to another metric. */
EXPR_ID_DATA__REF,
/* A reference but the value has been computed. */
EXPR_ID_DATA__REF_VALUE,
} kind;
};
static size_t key_hash(long key, void *ctx __maybe_unused)
{
const char *str = (const char *)key;
size_t hash = 0;
while (*str != '\0') {
hash *= 31;
hash += *str;
str++;
}
return hash;
}
static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
{
return !strcmp((const char *)key1, (const char *)key2);
}
struct hashmap *ids__new(void)
{
struct hashmap *hash;
hash = hashmap__new(key_hash, key_equal, NULL);
if (IS_ERR(hash))
return NULL;
return hash;
}
void ids__free(struct hashmap *ids)
{
struct hashmap_entry *cur;
size_t bkt;
if (ids == NULL)
return;
hashmap__for_each_entry(ids, cur, bkt) {
zfree(&cur->pkey);
zfree(&cur->pvalue);
}
hashmap__free(ids);
}
int ids__insert(struct hashmap *ids, const char *id)
{
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
char *old_key = NULL;
int ret;
ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data);
if (ret)
free(data_ptr);
free(old_key);
free(old_data);
return ret;
}
struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
{
size_t bkt;
struct hashmap_entry *cur;
int ret;
struct expr_id_data *old_data = NULL;
char *old_key = NULL;
if (!ids1)
return ids2;
if (!ids2)
return ids1;
if (hashmap__size(ids1) < hashmap__size(ids2)) {
struct hashmap *tmp = ids1;
ids1 = ids2;
ids2 = tmp;
}
hashmap__for_each_entry(ids2, cur, bkt) {
ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
free(old_key);
free(old_data);
if (ret) {
hashmap__free(ids1);
hashmap__free(ids2);
return NULL;
}
}
hashmap__free(ids2);
return ids1;
}
/* Caller must make sure id is allocated */
int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
{
return ids__insert(ctx->ids, id);
}
/* Caller must make sure id is allocated */
int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
{
return expr__add_id_val_source_count(ctx, id, val, /*source_count=*/1);
}
/* Caller must make sure id is allocated */
int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
double val, int source_count)
{
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
char *old_key = NULL;
int ret;
data_ptr = malloc(sizeof(*data_ptr));
if (!data_ptr)
return -ENOMEM;
data_ptr->val.val = val;
data_ptr->val.source_count = source_count;
data_ptr->kind = EXPR_ID_DATA__VALUE;
ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
if (ret)
free(data_ptr);
free(old_key);
free(old_data);
return ret;
}
int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
{
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
char *old_key = NULL;
char *name;
int ret;
data_ptr = zalloc(sizeof(*data_ptr));
if (!data_ptr)
return -ENOMEM;
name = strdup(ref->metric_name);
if (!name) {
free(data_ptr);
return -ENOMEM;
}
/*
* Intentionally passing just const char pointers,
* originally from 'struct pmu_event' object.
* We don't need to change them, so there's no
* need to create our own copy.
*/
data_ptr->ref.metric_name = ref->metric_name;
data_ptr->ref.metric_expr = ref->metric_expr;
data_ptr->kind = EXPR_ID_DATA__REF;
ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data);
if (ret)
free(data_ptr);
pr_debug2("adding ref metric %s: %s\n",
ref->metric_name, ref->metric_expr);
free(old_key);
free(old_data);
return ret;
}
int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **data)
{
return hashmap__find(ctx->ids, id, data) ? 0 : -1;
}
bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
struct expr_parse_ctx *needles)
{
struct hashmap_entry *cur;
size_t bkt;
struct expr_id_data *data;
hashmap__for_each_entry(needles->ids, cur, bkt) {
if (expr__get_id(haystack, cur->pkey, &data))
return false;
}
return true;
}
int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **datap)
{
struct expr_id_data *data;
if (expr__get_id(ctx, id, datap) || !*datap) {
pr_debug("%s not found\n", id);
return -1;
}
data = *datap;
switch (data->kind) {
case EXPR_ID_DATA__VALUE:
pr_debug2("lookup(%s): val %f\n", id, data->val.val);
break;
case EXPR_ID_DATA__REF:
pr_debug2("lookup(%s): ref metric name %s\n", id,
data->ref.metric_name);
pr_debug("processing metric: %s ENTRY\n", id);
data->kind = EXPR_ID_DATA__REF_VALUE;
if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr)) {
pr_debug("%s failed to count\n", id);
return -1;
}
pr_debug("processing metric: %s EXIT: %f\n", id, data->ref.val);
break;
case EXPR_ID_DATA__REF_VALUE:
pr_debug2("lookup(%s): ref val %f metric name %s\n", id,
data->ref.val, data->ref.metric_name);
break;
default:
assert(0); /* Unreachable. */
}
return 0;
}
void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
{
struct expr_id_data *old_val = NULL;
char *old_key = NULL;
hashmap__delete(ctx->ids, id, &old_key, &old_val);
free(old_key);
free(old_val);
}
struct expr_parse_ctx *expr__ctx_new(void)
{
struct expr_parse_ctx *ctx;
ctx = malloc(sizeof(struct expr_parse_ctx));
if (!ctx)
return NULL;
ctx->ids = hashmap__new(key_hash, key_equal, NULL);
if (IS_ERR(ctx->ids)) {
free(ctx);
return NULL;
}
ctx->sctx.user_requested_cpu_list = NULL;
ctx->sctx.runtime = 0;
ctx->sctx.system_wide = false;
return ctx;
}
void expr__ctx_clear(struct expr_parse_ctx *ctx)
{
struct hashmap_entry *cur;
size_t bkt;
hashmap__for_each_entry(ctx->ids, cur, bkt) {
zfree(&cur->pkey);
zfree(&cur->pvalue);
}
hashmap__clear(ctx->ids);
}
void expr__ctx_free(struct expr_parse_ctx *ctx)
{
struct hashmap_entry *cur;
size_t bkt;
if (!ctx)
return;
zfree(&ctx->sctx.user_requested_cpu_list);
hashmap__for_each_entry(ctx->ids, cur, bkt) {
zfree(&cur->pkey);
zfree(&cur->pvalue);
}
hashmap__free(ctx->ids);
free(ctx);
}
static int
__expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
bool compute_ids)
{
YY_BUFFER_STATE buffer;
void *scanner;
int ret;
pr_debug2("parsing metric: %s\n", expr);
ret = expr_lex_init_extra(&ctx->sctx, &scanner);
if (ret)
return ret;
buffer = expr__scan_string(expr, scanner);
#ifdef PARSER_DEBUG
expr_debug = 1;
expr_set_debug(1, scanner);
#endif
ret = expr_parse(val, ctx, compute_ids, scanner);
expr__flush_buffer(buffer, scanner);
expr__delete_buffer(buffer, scanner);
expr_lex_destroy(scanner);
return ret;
}
int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
const char *expr)
{
return __expr__parse(final_val, ctx, expr, /*compute_ids=*/false) ? -1 : 0;
}
int expr__find_ids(const char *expr, const char *one,
struct expr_parse_ctx *ctx)
{
int ret = __expr__parse(NULL, ctx, expr, /*compute_ids=*/true);
if (one)
expr__del_id(ctx, one);
return ret;
}
double expr_id_data__value(const struct expr_id_data *data)
{
if (data->kind == EXPR_ID_DATA__VALUE)
return data->val.val;
assert(data->kind == EXPR_ID_DATA__REF_VALUE);
return data->ref.val;
}
double expr_id_data__source_count(const struct expr_id_data *data)
{
assert(data->kind == EXPR_ID_DATA__VALUE);
return data->val.source_count;
}
#if !defined(__i386__) && !defined(__x86_64__)
double arch_get_tsc_freq(void)
{
return 0.0;
}
#endif
static double has_pmem(void)
{
static bool has_pmem, cached;
const char *sysfs = sysfs__mountpoint();
char path[PATH_MAX];
if (!cached) {
snprintf(path, sizeof(path), "%s/firmware/acpi/tables/NFIT", sysfs);
has_pmem = access(path, F_OK) == 0;
cached = true;
}
return has_pmem ? 1.0 : 0.0;
}
double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
{
const struct cpu_topology *topology;
double result = NAN;
if (!strcmp("#num_cpus", literal)) {
result = cpu__max_present_cpu().cpu;
goto out;
}
if (!strcmp("#num_cpus_online", literal)) {
struct perf_cpu_map *online = cpu_map__online();
if (online)
result = perf_cpu_map__nr(online);
goto out;
}
if (!strcasecmp("#system_tsc_freq", literal)) {
result = arch_get_tsc_freq();
goto out;
}
/*
* Assume that topology strings are consistent, such as CPUs "0-1"
* wouldn't be listed as "0,1", and so after deduplication the number of
* these strings gives an indication of the number of packages, dies,
* etc.
*/
if (!strcasecmp("#smt_on", literal)) {
result = smt_on() ? 1.0 : 0.0;
goto out;
}
if (!strcmp("#core_wide", literal)) {
result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list)
? 1.0 : 0.0;
goto out;
}
if (!strcmp("#num_packages", literal)) {
topology = online_topology();
result = topology->package_cpus_lists;
goto out;
}
if (!strcmp("#num_dies", literal)) {
topology = online_topology();
result = topology->die_cpus_lists;
goto out;
}
if (!strcmp("#num_cores", literal)) {
topology = online_topology();
result = topology->core_cpus_lists;
goto out;
}
if (!strcmp("#slots", literal)) {
result = perf_pmu__cpu_slots_per_cycle();
goto out;
}
if (!strcmp("#has_pmem", literal)) {
result = has_pmem();
goto out;
}
pr_err("Unrecognized literal '%s'", literal);
out:
pr_debug2("literal: %s = %f\n", literal, result);
return result;
}
/* Does the event 'id' parse? Determine via ctx->ids if possible. */
double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id)
{
struct evlist *tmp;
double ret;
if (hashmap__find(ctx->ids, id, /*value=*/NULL))
return 1.0;
if (!compute_ids)
return 0.0;
tmp = evlist__new();
if (!tmp)
return NAN;
ret = parse_event(tmp, id) ? 0 : 1;
evlist__delete(tmp);
return ret;
}
double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
bool compute_ids __maybe_unused, const char *test_id)
{
double ret;
struct perf_pmu *pmu = pmu__find_core_pmu();
char *cpuid = perf_pmu__getcpuid(pmu);
if (!cpuid)
return NAN;
ret = !strcmp_cpuid_str(test_id, cpuid);
free(cpuid);
return ret;
}
| linux-master | tools/perf/util/expr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*
* Parts came from builtin-{top,stat,record}.c, see those files for further
* copyright notes.
*/
#include <api/fs/fs.h>
#include <errno.h>
#include <inttypes.h>
#include <poll.h>
#include "cpumap.h"
#include "util/mmap.h"
#include "thread_map.h"
#include "target.h"
#include "evlist.h"
#include "evsel.h"
#include "record.h"
#include "debug.h"
#include "units.h"
#include "bpf_counter.h"
#include <internal/lib.h> // page_size
#include "affinity.h"
#include "../perf.h"
#include "asm/bug.h"
#include "bpf-event.h"
#include "util/event.h"
#include "util/string2.h"
#include "util/perf_api_probe.h"
#include "util/evsel_fprintf.h"
#include "util/pmu.h"
#include "util/sample.h"
#include "util/bpf-filter.h"
#include "util/stat.h"
#include "util/util.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
#include <stdlib.h>
#include "parse-events.h"
#include <subcmd/parse-options.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/timerfd.h>
#include <linux/bitops.h>
#include <linux/hash.h>
#include <linux/log2.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <perf/evlist.h>
#include <perf/evsel.h>
#include <perf/cpumap.h>
#include <perf/mmap.h>
#include <internal/xyarray.h>
#ifdef LACKS_SIGQUEUE_PROTOTYPE
int sigqueue(pid_t pid, int sig, const union sigval value);
#endif
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
perf_evlist__init(&evlist->core);
perf_evlist__set_maps(&evlist->core, cpus, threads);
evlist->workload.pid = -1;
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
evlist->ctl_fd.fd = -1;
evlist->ctl_fd.ack = -1;
evlist->ctl_fd.pos = -1;
}
struct evlist *evlist__new(void)
{
struct evlist *evlist = zalloc(sizeof(*evlist));
if (evlist != NULL)
evlist__init(evlist, NULL, NULL);
return evlist;
}
struct evlist *evlist__new_default(void)
{
struct evlist *evlist = evlist__new();
bool can_profile_kernel;
int err;
if (!evlist)
return NULL;
can_profile_kernel = perf_event_paranoid_check(1);
err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
if (err) {
evlist__delete(evlist);
evlist = NULL;
}
return evlist;
}
struct evlist *evlist__new_dummy(void)
{
struct evlist *evlist = evlist__new();
if (evlist && evlist__add_dummy(evlist)) {
evlist__delete(evlist);
evlist = NULL;
}
return evlist;
}
/**
* evlist__set_id_pos - set the positions of event ids.
* @evlist: selected event list
*
* Events with compatible sample types all have the same id_pos
* and is_pos. For convenience, put a copy on evlist.
*/
void evlist__set_id_pos(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist);
evlist->id_pos = first->id_pos;
evlist->is_pos = first->is_pos;
}
static void evlist__update_id_pos(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
evsel__calc_id_pos(evsel);
evlist__set_id_pos(evlist);
}
static void evlist__purge(struct evlist *evlist)
{
struct evsel *pos, *n;
evlist__for_each_entry_safe(evlist, n, pos) {
list_del_init(&pos->core.node);
pos->evlist = NULL;
evsel__delete(pos);
}
evlist->core.nr_entries = 0;
}
void evlist__exit(struct evlist *evlist)
{
event_enable_timer__exit(&evlist->eet);
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
perf_evlist__exit(&evlist->core);
}
void evlist__delete(struct evlist *evlist)
{
if (evlist == NULL)
return;
evlist__free_stats(evlist);
evlist__munmap(evlist);
evlist__close(evlist);
evlist__purge(evlist);
evlist__exit(evlist);
free(evlist);
}
void evlist__add(struct evlist *evlist, struct evsel *entry)
{
perf_evlist__add(&evlist->core, &entry->core);
entry->evlist = evlist;
entry->tracking = !entry->core.idx;
if (evlist->core.nr_entries == 1)
evlist__set_id_pos(evlist);
}
void evlist__remove(struct evlist *evlist, struct evsel *evsel)
{
evsel->evlist = NULL;
perf_evlist__remove(&evlist->core, &evsel->core);
}
void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
{
while (!list_empty(list)) {
struct evsel *evsel, *temp, *leader = NULL;
__evlist__for_each_entry_safe(list, temp, evsel) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
leader = evsel;
break;
}
__evlist__for_each_entry_safe(list, temp, evsel) {
if (evsel__has_leader(evsel, leader)) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
}
}
}
}
int __evlist__set_tracepoints_handlers(struct evlist *evlist,
const struct evsel_str_handler *assocs, size_t nr_assocs)
{
size_t i;
int err;
for (i = 0; i < nr_assocs; i++) {
// Adding a handler for an event not in this evlist, just ignore it.
struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
if (evsel == NULL)
continue;
err = -EEXIST;
if (evsel->handler != NULL)
goto out;
evsel->handler = assocs[i].handler;
}
err = 0;
out:
return err;
}
static void evlist__set_leader(struct evlist *evlist)
{
perf_evlist__set_leader(&evlist->core);
}
static struct evsel *evlist__dummy_event(struct evlist *evlist)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
};
return evsel__new_idx(&attr, evlist->core.nr_entries);
}
int evlist__add_dummy(struct evlist *evlist)
{
struct evsel *evsel = evlist__dummy_event(evlist);
if (evsel == NULL)
return -ENOMEM;
evlist__add(evlist, evsel);
return 0;
}
struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
{
struct evsel *evsel = evlist__dummy_event(evlist);
if (!evsel)
return NULL;
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_guest = 1;
evsel->core.attr.exclude_hv = 1;
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
evsel->core.system_wide = system_wide;
evsel->no_aux_samples = true;
evsel->name = strdup("dummy:u");
evlist__add(evlist, evsel);
return evsel;
}
#ifdef HAVE_LIBTRACEEVENT
struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide)
{
struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0);
if (IS_ERR(evsel))
return evsel;
evsel__set_sample_bit(evsel, CPU);
evsel__set_sample_bit(evsel, TIME);
evsel->core.system_wide = system_wide;
evsel->no_aux_samples = true;
evlist__add(evlist, evsel);
return evsel;
}
#endif
int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
{
struct evsel *evsel, *n;
LIST_HEAD(head);
size_t i;
for (i = 0; i < nr_attrs; i++) {
evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
if (evsel == NULL)
goto out_delete_partial_list;
list_add_tail(&evsel->core.node, &head);
}
evlist__splice_list_tail(evlist, &head);
return 0;
out_delete_partial_list:
__evlist__for_each_entry_safe(&head, n, evsel)
evsel__delete(evsel);
return -1;
}
int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
{
size_t i;
for (i = 0; i < nr_attrs; i++)
event_attr_init(attrs + i);
return evlist__add_attrs(evlist, attrs, nr_attrs);
}
__weak int arch_evlist__add_default_attrs(struct evlist *evlist,
struct perf_event_attr *attrs,
size_t nr_attrs)
{
if (!nr_attrs)
return 0;
return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
}
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
(int)evsel->core.attr.config == id)
return evsel;
}
return NULL;
}
struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
(strcmp(evsel->name, name) == 0))
return evsel;
}
return NULL;
}
#ifdef HAVE_LIBTRACEEVENT
int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
{
struct evsel *evsel = evsel__newtp(sys, name);
if (IS_ERR(evsel))
return -1;
evsel->handler = handler;
evlist__add(evlist, evsel);
return 0;
}
#endif
struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
{
struct evlist_cpu_iterator itr = {
.container = evlist,
.evsel = NULL,
.cpu_map_idx = 0,
.evlist_cpu_map_idx = 0,
.evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
.cpu = (struct perf_cpu){ .cpu = -1},
.affinity = affinity,
};
if (evlist__empty(evlist)) {
/* Ensure the empty list doesn't iterate. */
itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr;
} else {
itr.evsel = evlist__first(evlist);
if (itr.affinity) {
itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
affinity__set(itr.affinity, itr.cpu.cpu);
itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
/*
* If this CPU isn't in the evsel's cpu map then advance
* through the list.
*/
if (itr.cpu_map_idx == -1)
evlist_cpu_iterator__next(&itr);
}
}
return itr;
}
void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
{
while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
evlist_cpu_itr->cpu_map_idx =
perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
evlist_cpu_itr->cpu);
if (evlist_cpu_itr->cpu_map_idx != -1)
return;
}
evlist_cpu_itr->evlist_cpu_map_idx++;
if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
evlist_cpu_itr->cpu =
perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
evlist_cpu_itr->evlist_cpu_map_idx);
if (evlist_cpu_itr->affinity)
affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu);
evlist_cpu_itr->cpu_map_idx =
perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
evlist_cpu_itr->cpu);
/*
* If this CPU isn't in the evsel's cpu map then advance through
* the list.
*/
if (evlist_cpu_itr->cpu_map_idx == -1)
evlist_cpu_iterator__next(evlist_cpu_itr);
}
}
bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
{
return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
}
static int evsel__strcmp(struct evsel *pos, char *evsel_name)
{
if (!evsel_name)
return 0;
if (evsel__is_dummy_event(pos))
return 1;
return !evsel__name_is(pos, evsel_name);
}
static int evlist__is_enabled(struct evlist *evlist)
{
struct evsel *pos;
evlist__for_each_entry(evlist, pos) {
if (!evsel__is_group_leader(pos) || !pos->core.fd)
continue;
/* If at least one event is enabled, evlist is enabled. */
if (!pos->disabled)
return true;
}
return false;
}
static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy)
{
struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity = NULL;
bool has_imm = false;
// See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return;
affinity = &saved_affinity;
}
/* Disable 'immediate' events last */
for (int imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name))
continue;
if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
continue;
if (excl_dummy && evsel__is_dummy_event(pos))
continue;
if (pos->immediate)
has_imm = true;
if (pos->immediate != imm)
continue;
evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
}
if (!has_imm)
break;
}
affinity__cleanup(affinity);
evlist__for_each_entry(evlist, pos) {
if (evsel__strcmp(pos, evsel_name))
continue;
if (!evsel__is_group_leader(pos) || !pos->core.fd)
continue;
if (excl_dummy && evsel__is_dummy_event(pos))
continue;
pos->disabled = true;
}
/*
* If we disabled only single event, we need to check
* the enabled state of the evlist manually.
*/
if (evsel_name)
evlist->enabled = evlist__is_enabled(evlist);
else
evlist->enabled = false;
}
void evlist__disable(struct evlist *evlist)
{
__evlist__disable(evlist, NULL, false);
}
void evlist__disable_non_dummy(struct evlist *evlist)
{
__evlist__disable(evlist, NULL, true);
}
void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
{
__evlist__disable(evlist, evsel_name, false);
}
static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy)
{
struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity = NULL;
// See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return;
affinity = &saved_affinity;
}
evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name))
continue;
if (!evsel__is_group_leader(pos) || !pos->core.fd)
continue;
if (excl_dummy && evsel__is_dummy_event(pos))
continue;
evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
}
affinity__cleanup(affinity);
evlist__for_each_entry(evlist, pos) {
if (evsel__strcmp(pos, evsel_name))
continue;
if (!evsel__is_group_leader(pos) || !pos->core.fd)
continue;
if (excl_dummy && evsel__is_dummy_event(pos))
continue;
pos->disabled = false;
}
/*
* Even single event sets the 'enabled' for evlist,
* so the toggle can work properly and toggle to
* 'disabled' state.
*/
evlist->enabled = true;
}
void evlist__enable(struct evlist *evlist)
{
__evlist__enable(evlist, NULL, false);
}
void evlist__enable_non_dummy(struct evlist *evlist)
{
__evlist__enable(evlist, NULL, true);
}
void evlist__enable_evsel(struct evlist *evlist, char *evsel_name)
{
__evlist__enable(evlist, evsel_name, false);
}
void evlist__toggle_enable(struct evlist *evlist)
{
(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
}
int evlist__add_pollfd(struct evlist *evlist, int fd)
{
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
}
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
{
return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
}
#ifdef HAVE_EVENTFD_SUPPORT
int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
{
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
fdarray_flag__nonfilterable |
fdarray_flag__non_perf_event);
}
#endif
int evlist__poll(struct evlist *evlist, int timeout)
{
return perf_evlist__poll(&evlist->core, timeout);
}
struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
{
struct hlist_head *head;
struct perf_sample_id *sid;
int hash;
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->core.heads[hash];
hlist_for_each_entry(sid, head, node)
if (sid->id == id)
return sid;
return NULL;
}
struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
{
struct perf_sample_id *sid;
if (evlist->core.nr_entries == 1 || !id)
return evlist__first(evlist);
sid = evlist__id2sid(evlist, id);
if (sid)
return container_of(sid->evsel, struct evsel, core);
if (!evlist__sample_id_all(evlist))
return evlist__first(evlist);
return NULL;
}
struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
{
struct perf_sample_id *sid;
if (!id)
return NULL;
sid = evlist__id2sid(evlist, id);
if (sid)
return container_of(sid->evsel, struct evsel, core);
return NULL;
}
static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
{
const __u64 *array = event->sample.array;
ssize_t n;
n = (event->header.size - sizeof(event->header)) >> 3;
if (event->header.type == PERF_RECORD_SAMPLE) {
if (evlist->id_pos >= n)
return -1;
*id = array[evlist->id_pos];
} else {
if (evlist->is_pos > n)
return -1;
n -= evlist->is_pos;
*id = array[n];
}
return 0;
}
struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
{
struct evsel *first = evlist__first(evlist);
struct hlist_head *head;
struct perf_sample_id *sid;
int hash;
u64 id;
if (evlist->core.nr_entries == 1)
return first;
if (!first->core.attr.sample_id_all &&
event->header.type != PERF_RECORD_SAMPLE)
return first;
if (evlist__event2id(evlist, event, &id))
return NULL;
/* Synthesized events have an id of zero */
if (!id)
return first;
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->core.heads[hash];
hlist_for_each_entry(sid, head, node) {
if (sid->id == id)
return container_of(sid->evsel, struct evsel, core);
}
return NULL;
}
static int evlist__set_paused(struct evlist *evlist, bool value)
{
int i;
if (!evlist->overwrite_mmap)
return 0;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
int fd = evlist->overwrite_mmap[i].core.fd;
int err;
if (fd < 0)
continue;
err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
if (err)
return err;
}
return 0;
}
static int evlist__pause(struct evlist *evlist)
{
return evlist__set_paused(evlist, true);
}
static int evlist__resume(struct evlist *evlist)
{
return evlist__set_paused(evlist, false);
}
static void evlist__munmap_nofree(struct evlist *evlist)
{
int i;
if (evlist->mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i].core);
if (evlist->overwrite_mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
}
void evlist__munmap(struct evlist *evlist)
{
evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
}
static void perf_mmap__unmap_cb(struct perf_mmap *map)
{
struct mmap *m = container_of(map, struct mmap, core);
mmap__munmap(m);
}
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
bool overwrite)
{
int i;
struct mmap *map;
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map)
return NULL;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
/*
* When the perf_mmap() call is made we grab one refcount, plus
* one extra to let perf_mmap__consume() get the last
* events after all real references (perf_mmap__get()) are
* dropped.
*
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
* thus does perf_mmap__get() on it.
*/
perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
}
return map;
}
static void
perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
struct perf_evsel *_evsel,
struct perf_mmap_param *_mp,
int idx)
{
struct evlist *evlist = container_of(_evlist, struct evlist, core);
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
struct evsel *evsel = container_of(_evsel, struct evsel, core);
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx);
}
static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
{
struct evlist *evlist = container_of(_evlist, struct evlist, core);
struct mmap *maps;
maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
if (!maps) {
maps = evlist__alloc_mmap(evlist, overwrite);
if (!maps)
return NULL;
if (overwrite) {
evlist->overwrite_mmap = maps;
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
} else {
evlist->mmap = maps;
}
}
return &maps[idx].core;
}
static int
perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
int output, struct perf_cpu cpu)
{
struct mmap *map = container_of(_map, struct mmap, core);
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
return mmap__mmap(map, mp, output, cpu);
}
unsigned long perf_event_mlock_kb_in_pages(void)
{
unsigned long pages;
int max;
if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
/*
* Pick a once upon a time good value, i.e. things look
* strange since we can't read a sysctl value, but lets not
* die yet...
*/
max = 512;
} else {
max -= (page_size / 1024);
}
pages = (max * 1024) / page_size;
if (!is_power_of_2(pages))
pages = rounddown_pow_of_two(pages);
return pages;
}
size_t evlist__mmap_size(unsigned long pages)
{
if (pages == UINT_MAX)
pages = perf_event_mlock_kb_in_pages();
else if (!is_power_of_2(pages))
return 0;
return (pages + 1) * page_size;
}
static long parse_pages_arg(const char *str, unsigned long min,
unsigned long max)
{
unsigned long pages, val;
static struct parse_tag tags[] = {
{ .tag = 'B', .mult = 1 },
{ .tag = 'K', .mult = 1 << 10 },
{ .tag = 'M', .mult = 1 << 20 },
{ .tag = 'G', .mult = 1 << 30 },
{ .tag = 0 },
};
if (str == NULL)
return -EINVAL;
val = parse_tag_value(str, tags);
if (val != (unsigned long) -1) {
/* we got file size value */
pages = PERF_ALIGN(val, page_size) / page_size;
} else {
/* we got pages count value */
char *eptr;
pages = strtoul(str, &eptr, 10);
if (*eptr != '\0')
return -EINVAL;
}
if (pages == 0 && min == 0) {
/* leave number of pages at 0 */
} else if (!is_power_of_2(pages)) {
char buf[100];
/* round pages up to next power of 2 */
pages = roundup_pow_of_two(pages);
if (!pages)
return -EINVAL;
unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
pr_info("rounding mmap pages size to %s (%lu pages)\n",
buf, pages);
}
if (pages > max)
return -EINVAL;
return pages;
}
int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
{
unsigned long max = UINT_MAX;
long pages;
if (max > SIZE_MAX / page_size)
max = SIZE_MAX / page_size;
pages = parse_pages_arg(str, 1, max);
if (pages < 0) {
pr_err("Invalid argument for --mmap_pages/-m\n");
return -1;
}
*mmap_pages = pages;
return 0;
}
int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
{
return __evlist__parse_mmap_pages(opt->value, str);
}
/**
* evlist__mmap_ex - Create mmaps to receive events.
* @evlist: list of events
* @pages: map length in pages
* @overwrite: overwrite older events?
* @auxtrace_pages - auxtrace map length in pages
* @auxtrace_overwrite - overwrite older auxtrace data?
*
* If @overwrite is %false the user needs to signal event consumption using
* perf_mmap__write_tail(). Using evlist__mmap_read() does this
* automatically.
*
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data
* consumption using auxtrace_mmap__write_tail().
*
* Return: %0 on success, negative error code otherwise.
*/
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
int comp_level)
{
/*
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
struct mmap_params mp = {
.nr_cblocks = nr_cblocks,
.affinity = affinity,
.flush = flush,
.comp_level = comp_level
};
struct perf_evlist_mmap_ops ops = {
.idx = perf_evlist__mmap_cb_idx,
.get = perf_evlist__mmap_cb_get,
.mmap = perf_evlist__mmap_cb_mmap,
};
evlist->core.mmap_len = evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
auxtrace_pages, auxtrace_overwrite);
return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
}
int evlist__mmap(struct evlist *evlist, unsigned int pages)
{
return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
}
int evlist__create_maps(struct evlist *evlist, struct target *target)
{
bool all_threads = (target->per_thread && target->system_wide);
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
/*
* If specify '-a' and '--per-thread' to perf record, perf record
* will override '--per-thread'. target->per_thread = false and
* target->system_wide = true.
*
* If specify '--per-thread' only to perf record,
* target->per_thread = true and target->system_wide = false.
*
* So target->per_thread && target->system_wide is false.
* For perf record, thread_map__new_str doesn't call
* thread_map__new_all_cpus. That will keep perf record's
* current behavior.
*
* For perf stat, it allows the case that target->per_thread and
* target->system_wide are all true. It means to collect system-wide
* per-thread data. thread_map__new_str will call
* thread_map__new_all_cpus to enumerate all threads.
*/
threads = thread_map__new_str(target->pid, target->tid, target->uid,
all_threads);
if (!threads)
return -1;
if (target__uses_dummy_map(target))
cpus = perf_cpu_map__dummy_new();
else
cpus = perf_cpu_map__new(target->cpu_list);
if (!cpus)
goto out_delete_threads;
evlist->core.has_user_cpus = !!target->cpu_list;
perf_evlist__set_maps(&evlist->core, cpus, threads);
/* as evlist now has references, put count here */
perf_cpu_map__put(cpus);
perf_thread_map__put(threads);
return 0;
out_delete_threads:
perf_thread_map__put(threads);
return -1;
}
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
{
struct evsel *evsel;
int err = 0;
evlist__for_each_entry(evlist, evsel) {
/*
* filters only work for tracepoint event, which doesn't have cpu limit.
* So evlist and evsel should always be same.
*/
if (evsel->filter) {
err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
if (err) {
*err_evsel = evsel;
break;
}
}
/*
* non-tracepoint events can have BPF filters.
*/
if (!list_empty(&evsel->bpf_filters)) {
err = perf_bpf_filter__prepare(evsel);
if (err) {
*err_evsel = evsel;
break;
}
}
}
return err;
}
int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
{
struct evsel *evsel;
int err = 0;
if (filter == NULL)
return -1;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
err = evsel__set_filter(evsel, filter);
if (err)
break;
}
return err;
}
int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
{
struct evsel *evsel;
int err = 0;
if (filter == NULL)
return -1;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
err = evsel__append_tp_filter(evsel, filter);
if (err)
break;
}
return err;
}
char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
{
char *filter;
size_t i;
for (i = 0; i < npids; ++i) {
if (i == 0) {
if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
return NULL;
} else {
char *tmp;
if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
goto out_free;
free(filter);
filter = tmp;
}
}
return filter;
out_free:
free(filter);
return NULL;
}
int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
{
char *filter = asprintf__tp_filter_pids(npids, pids);
int ret = evlist__set_tp_filter(evlist, filter);
free(filter);
return ret;
}
int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
{
return evlist__set_tp_filter_pids(evlist, 1, &pid);
}
int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
{
char *filter = asprintf__tp_filter_pids(npids, pids);
int ret = evlist__append_tp_filter(evlist, filter);
free(filter);
return ret;
}
int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
{
return evlist__append_tp_filter_pids(evlist, 1, &pid);
}
bool evlist__valid_sample_type(struct evlist *evlist)
{
struct evsel *pos;
if (evlist->core.nr_entries == 1)
return true;
if (evlist->id_pos < 0 || evlist->is_pos < 0)
return false;
evlist__for_each_entry(evlist, pos) {
if (pos->id_pos != evlist->id_pos ||
pos->is_pos != evlist->is_pos)
return false;
}
return true;
}
u64 __evlist__combined_sample_type(struct evlist *evlist)
{
struct evsel *evsel;
if (evlist->combined_sample_type)
return evlist->combined_sample_type;
evlist__for_each_entry(evlist, evsel)
evlist->combined_sample_type |= evsel->core.attr.sample_type;
return evlist->combined_sample_type;
}
u64 evlist__combined_sample_type(struct evlist *evlist)
{
evlist->combined_sample_type = 0;
return __evlist__combined_sample_type(evlist);
}
u64 evlist__combined_branch_type(struct evlist *evlist)
{
struct evsel *evsel;
u64 branch_type = 0;
evlist__for_each_entry(evlist, evsel)
branch_type |= evsel->core.attr.branch_sample_type;
return branch_type;
}
bool evlist__valid_read_format(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist), *pos = first;
u64 read_format = first->core.attr.read_format;
u64 sample_type = first->core.attr.sample_type;
evlist__for_each_entry(evlist, pos) {
if (read_format != pos->core.attr.read_format) {
pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
read_format, (u64)pos->core.attr.read_format);
}
}
/* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
if ((sample_type & PERF_SAMPLE_READ) &&
!(read_format & PERF_FORMAT_ID)) {
return false;
}
return true;
}
u16 evlist__id_hdr_size(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist);
return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0;
}
bool evlist__valid_sample_id_all(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist), *pos = first;
evlist__for_each_entry_continue(evlist, pos) {
if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
return false;
}
return true;
}
bool evlist__sample_id_all(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist);
return first->core.attr.sample_id_all;
}
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
{
evlist->selected = evsel;
}
void evlist__close(struct evlist *evlist)
{
struct evsel *evsel;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity;
/*
* With perf record core.user_requested_cpus is usually NULL.
* Use the old method to handle this for now.
*/
if (!evlist->core.user_requested_cpus ||
cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
evlist__for_each_entry_reverse(evlist, evsel)
evsel__close(evsel);
return;
}
if (affinity__setup(&affinity) < 0)
return;
evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
evlist_cpu_itr.cpu_map_idx);
}
affinity__cleanup(&affinity);
evlist__for_each_entry_reverse(evlist, evsel) {
perf_evsel__free_fd(&evsel->core);
perf_evsel__free_id(&evsel->core);
}
perf_evlist__reset_id_hash(&evlist->core);
}
static int evlist__create_syswide_maps(struct evlist *evlist)
{
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
/*
* Try reading /sys/devices/system/cpu/online to get
* an all cpus map.
*
* FIXME: -ENOMEM is the best we can do here, the cpu_map
* code needs an overhaul to properly forward the
* error, and we may not want to do that fallback to a
* default cpu identity map :-\
*/
cpus = perf_cpu_map__new(NULL);
if (!cpus)
goto out;
threads = perf_thread_map__new_dummy();
if (!threads)
goto out_put;
perf_evlist__set_maps(&evlist->core, cpus, threads);
perf_thread_map__put(threads);
out_put:
perf_cpu_map__put(cpus);
out:
return -ENOMEM;
}
int evlist__open(struct evlist *evlist)
{
struct evsel *evsel;
int err;
/*
* Default: one fd per CPU, all threads, aka systemwide
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
*/
if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
err = evlist__create_syswide_maps(evlist);
if (err < 0)
goto out_err;
}
evlist__update_id_pos(evlist);
evlist__for_each_entry(evlist, evsel) {
err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
if (err < 0)
goto out_err;
}
return 0;
out_err:
evlist__close(evlist);
errno = -err;
return err;
}
int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
{
int child_ready_pipe[2], go_pipe[2];
char bf;
if (pipe(child_ready_pipe) < 0) {
perror("failed to create 'ready' pipe");
return -1;
}
if (pipe(go_pipe) < 0) {
perror("failed to create 'go' pipe");
goto out_close_ready_pipe;
}
evlist->workload.pid = fork();
if (evlist->workload.pid < 0) {
perror("failed to fork");
goto out_close_pipes;
}
if (!evlist->workload.pid) {
int ret;
if (pipe_output)
dup2(2, 1);
signal(SIGTERM, SIG_DFL);
close(child_ready_pipe[0]);
close(go_pipe[1]);
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
* Change the name of this process not to confuse --exclude-perf users
* that sees 'perf' in the window up to the execvp() and thinks that
* perf samples are not being excluded.
*/
prctl(PR_SET_NAME, "perf-exec");
/*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
/*
* Wait until the parent tells us to go.
*/
ret = read(go_pipe[0], &bf, 1);
/*
* The parent will ask for the execvp() to be performed by
* writing exactly one byte, in workload.cork_fd, usually via
* evlist__start_workload().
*
* For cancelling the workload without actually running it,
* the parent will just close workload.cork_fd, without writing
* anything, i.e. read will return zero and we just exit()
* here.
*/
if (ret != 1) {
if (ret == -1)
perror("unable to read pipe");
exit(ret);
}
execvp(argv[0], (char **)argv);
if (exec_error) {
union sigval val;
val.sival_int = errno;
if (sigqueue(getppid(), SIGUSR1, val))
perror(argv[0]);
} else
perror(argv[0]);
exit(-1);
}
if (exec_error) {
struct sigaction act = {
.sa_flags = SA_SIGINFO,
.sa_sigaction = exec_error,
};
sigaction(SIGUSR1, &act, NULL);
}
if (target__none(target)) {
if (evlist->core.threads == NULL) {
fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
__func__, __LINE__);
goto out_close_pipes;
}
perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
}
close(child_ready_pipe[1]);
close(go_pipe[0]);
/*
* wait for child to settle
*/
if (read(child_ready_pipe[0], &bf, 1) == -1) {
perror("unable to read pipe");
goto out_close_pipes;
}
fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
evlist->workload.cork_fd = go_pipe[1];
close(child_ready_pipe[0]);
return 0;
out_close_pipes:
close(go_pipe[0]);
close(go_pipe[1]);
out_close_ready_pipe:
close(child_ready_pipe[0]);
close(child_ready_pipe[1]);
return -1;
}
int evlist__start_workload(struct evlist *evlist)
{
if (evlist->workload.cork_fd > 0) {
char bf = 0;
int ret;
/*
* Remove the cork, let it rip!
*/
ret = write(evlist->workload.cork_fd, &bf, 1);
if (ret < 0)
perror("unable to write to pipe");
close(evlist->workload.cork_fd);
return ret;
}
return 0;
}
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
{
struct evsel *evsel = evlist__event2evsel(evlist, event);
int ret;
if (!evsel)
return -EFAULT;
ret = evsel__parse_sample(evsel, event, sample);
if (ret)
return ret;
if (perf_guest && sample->id) {
struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id);
if (sid) {
sample->machine_pid = sid->machine_pid;
sample->vcpu = sid->vcpu.cpu;
}
}
return 0;
}
int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
{
struct evsel *evsel = evlist__event2evsel(evlist, event);
if (!evsel)
return -EFAULT;
return evsel__parse_sample_timestamp(evsel, event, timestamp);
}
int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
{
int printed, value;
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
switch (err) {
case EACCES:
case EPERM:
printed = scnprintf(buf, size,
"Error:\t%s.\n"
"Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
value = perf_event_paranoid();
printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
if (value >= 2) {
printed += scnprintf(buf + printed, size - printed,
"For your workloads it needs to be <= 1\nHint:\t");
}
printed += scnprintf(buf + printed, size - printed,
"For system wide tracing it needs to be set to -1.\n");
printed += scnprintf(buf + printed, size - printed,
"Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
"Hint:\tThe current value is %d.", value);
break;
case EINVAL: {
struct evsel *first = evlist__first(evlist);
int max_freq;
if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
goto out_default;
if (first->core.attr.sample_freq < (u64)max_freq)
goto out_default;
printed = scnprintf(buf, size,
"Error:\t%s.\n"
"Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
"Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
emsg, max_freq, first->core.attr.sample_freq);
break;
}
default:
out_default:
scnprintf(buf, size, "%s", emsg);
break;
}
return 0;
}
int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
{
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
switch (err) {
case EPERM:
sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
printed += scnprintf(buf + printed, size - printed,
"Error:\t%s.\n"
"Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
"Hint:\tTried using %zd kB.\n",
emsg, pages_max_per_user, pages_attempted);
if (pages_attempted >= pages_max_per_user) {
printed += scnprintf(buf + printed, size - printed,
"Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
pages_max_per_user + pages_attempted);
}
printed += scnprintf(buf + printed, size - printed,
"Hint:\tTry using a smaller -m/--mmap-pages value.");
break;
default:
scnprintf(buf, size, "%s", emsg);
break;
}
return 0;
}
void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
{
struct evsel *evsel, *n;
LIST_HEAD(move);
if (move_evsel == evlist__first(evlist))
return;
evlist__for_each_entry_safe(evlist, n, evsel) {
if (evsel__leader(evsel) == evsel__leader(move_evsel))
list_move_tail(&evsel->core.node, &move);
}
list_splice(&move, &evlist->core.entries);
}
struct evsel *evlist__get_tracking_event(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->tracking)
return evsel;
}
return evlist__first(evlist);
}
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
{
struct evsel *evsel;
if (tracking_evsel->tracking)
return;
evlist__for_each_entry(evlist, evsel) {
if (evsel != tracking_evsel)
evsel->tracking = false;
}
tracking_evsel->tracking = true;
}
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (!evsel->name)
continue;
if (evsel__name_is(evsel, str))
return evsel;
}
return NULL;
}
void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
{
enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
enum action {
NONE,
PAUSE,
RESUME,
} action = NONE;
if (!evlist->overwrite_mmap)
return;
switch (old_state) {
case BKW_MMAP_NOTREADY: {
if (state != BKW_MMAP_RUNNING)
goto state_err;
break;
}
case BKW_MMAP_RUNNING: {
if (state != BKW_MMAP_DATA_PENDING)
goto state_err;
action = PAUSE;
break;
}
case BKW_MMAP_DATA_PENDING: {
if (state != BKW_MMAP_EMPTY)
goto state_err;
break;
}
case BKW_MMAP_EMPTY: {
if (state != BKW_MMAP_RUNNING)
goto state_err;
action = RESUME;
break;
}
default:
WARN_ONCE(1, "Shouldn't get there\n");
}
evlist->bkw_mmap_state = state;
switch (action) {
case PAUSE:
evlist__pause(evlist);
break;
case RESUME:
evlist__resume(evlist);
break;
case NONE:
default:
break;
}
state_err:
return;
}
bool evlist__exclude_kernel(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (!evsel->core.attr.exclude_kernel)
return false;
}
return true;
}
/*
* Events in data file are not collect in groups, but we still want
* the group display. Set the artificial group and set the leader's
* forced_leader flag to notify the display code.
*/
void evlist__force_leader(struct evlist *evlist)
{
if (evlist__nr_groups(evlist) == 0) {
struct evsel *leader = evlist__first(evlist);
evlist__set_leader(evlist);
leader->forced_leader = true;
}
}
struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
{
struct evsel *c2, *leader;
bool is_open = true;
leader = evsel__leader(evsel);
pr_debug("Weak group for %s/%d failed\n",
leader->name, leader->core.nr_members);
/*
* for_each_group_member doesn't work here because it doesn't
* include the first entry.
*/
evlist__for_each_entry(evsel_list, c2) {
if (c2 == evsel)
is_open = false;
if (evsel__has_leader(c2, leader)) {
if (is_open && close)
perf_evsel__close(&c2->core);
/*
* We want to close all members of the group and reopen
* them. Some events, like Intel topdown, require being
* in a group and so keep these in the group.
*/
evsel__remove_from_group(c2, leader);
/*
* Set this for all former members of the group
* to indicate they get reopened.
*/
c2->reset_group = true;
}
}
/* Reset the leader count if all entries were removed. */
if (leader->core.nr_members == 1)
leader->core.nr_members = 0;
return leader;
}
static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
{
char *s, *p;
int ret = 0, fd;
if (strncmp(str, "fifo:", 5))
return -EINVAL;
str += 5;
if (!*str || *str == ',')
return -EINVAL;
s = strdup(str);
if (!s)
return -ENOMEM;
p = strchr(s, ',');
if (p)
*p = '\0';
/*
* O_RDWR avoids POLLHUPs which is necessary to allow the other
* end of a FIFO to be repeatedly opened and closed.
*/
fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
if (fd < 0) {
pr_err("Failed to open '%s'\n", s);
ret = -errno;
goto out_free;
}
*ctl_fd = fd;
*ctl_fd_close = true;
if (p && *++p) {
/* O_RDWR | O_NONBLOCK means the other end need not be open */
fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
if (fd < 0) {
pr_err("Failed to open '%s'\n", p);
ret = -errno;
goto out_free;
}
*ctl_fd_ack = fd;
}
out_free:
free(s);
return ret;
}
int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
{
char *comma = NULL, *endptr = NULL;
*ctl_fd_close = false;
if (strncmp(str, "fd:", 3))
return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
*ctl_fd = strtoul(&str[3], &endptr, 0);
if (endptr == &str[3])
return -EINVAL;
comma = strchr(str, ',');
if (comma) {
if (endptr != comma)
return -EINVAL;
*ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
if (endptr == comma + 1 || *endptr != '\0')
return -EINVAL;
}
return 0;
}
void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
{
if (*ctl_fd_close) {
*ctl_fd_close = false;
close(ctl_fd);
if (ctl_fd_ack >= 0)
close(ctl_fd_ack);
}
}
int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
{
if (fd == -1) {
pr_debug("Control descriptor is not initialized\n");
return 0;
}
evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
fdarray_flag__nonfilterable |
fdarray_flag__non_perf_event);
if (evlist->ctl_fd.pos < 0) {
evlist->ctl_fd.pos = -1;
pr_err("Failed to add ctl fd entry: %m\n");
return -1;
}
evlist->ctl_fd.fd = fd;
evlist->ctl_fd.ack = ack;
return 0;
}
bool evlist__ctlfd_initialized(struct evlist *evlist)
{
return evlist->ctl_fd.pos >= 0;
}
int evlist__finalize_ctlfd(struct evlist *evlist)
{
struct pollfd *entries = evlist->core.pollfd.entries;
if (!evlist__ctlfd_initialized(evlist))
return 0;
entries[evlist->ctl_fd.pos].fd = -1;
entries[evlist->ctl_fd.pos].events = 0;
entries[evlist->ctl_fd.pos].revents = 0;
evlist->ctl_fd.pos = -1;
evlist->ctl_fd.ack = -1;
evlist->ctl_fd.fd = -1;
return 0;
}
static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
char *cmd_data, size_t data_size)
{
int err;
char c;
size_t bytes_read = 0;
*cmd = EVLIST_CTL_CMD_UNSUPPORTED;
memset(cmd_data, 0, data_size);
data_size--;
do {
err = read(evlist->ctl_fd.fd, &c, 1);
if (err > 0) {
if (c == '\n' || c == '\0')
break;
cmd_data[bytes_read++] = c;
if (bytes_read == data_size)
break;
continue;
} else if (err == -1) {
if (errno == EINTR)
continue;
if (errno == EAGAIN || errno == EWOULDBLOCK)
err = 0;
else
pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
}
break;
} while (1);
pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
if (bytes_read > 0) {
if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
(sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
*cmd = EVLIST_CTL_CMD_ENABLE;
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
(sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
*cmd = EVLIST_CTL_CMD_DISABLE;
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
(sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
*cmd = EVLIST_CTL_CMD_SNAPSHOT;
pr_debug("is snapshot\n");
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
(sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
*cmd = EVLIST_CTL_CMD_EVLIST;
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
(sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
*cmd = EVLIST_CTL_CMD_STOP;
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
(sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
*cmd = EVLIST_CTL_CMD_PING;
}
}
return bytes_read ? (int)bytes_read : err;
}
int evlist__ctlfd_ack(struct evlist *evlist)
{
int err;
if (evlist->ctl_fd.ack == -1)
return 0;
err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
sizeof(EVLIST_CTL_CMD_ACK_TAG));
if (err == -1)
pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
return err;
}
static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
{
char *data = cmd_data + cmd_size;
/* no argument */
if (!*data)
return 0;
/* there's argument */
if (*data == ' ') {
*arg = data + 1;
return 1;
}
/* malformed */
return -1;
}
static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
{
struct evsel *evsel;
char *name;
int err;
err = get_cmd_arg(cmd_data,
enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
&name);
if (err < 0) {
pr_info("failed: wrong command\n");
return -1;
}
if (err) {
evsel = evlist__find_evsel_by_str(evlist, name);
if (evsel) {
if (enable)
evlist__enable_evsel(evlist, name);
else
evlist__disable_evsel(evlist, name);
pr_info("Event %s %s\n", evsel->name,
enable ? "enabled" : "disabled");
} else {
pr_info("failed: can't find '%s' event\n", name);
}
} else {
if (enable) {
evlist__enable(evlist);
pr_info(EVLIST_ENABLED_MSG);
} else {
evlist__disable(evlist);
pr_info(EVLIST_DISABLED_MSG);
}
}
return 0;
}
static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
{
struct perf_attr_details details = { .verbose = false, };
struct evsel *evsel;
char *arg;
int err;
err = get_cmd_arg(cmd_data,
sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
&arg);
if (err < 0) {
pr_info("failed: wrong command\n");
return -1;
}
if (err) {
if (!strcmp(arg, "-v")) {
details.verbose = true;
} else if (!strcmp(arg, "-g")) {
details.event_group = true;
} else if (!strcmp(arg, "-F")) {
details.freq = true;
} else {
pr_info("failed: wrong command\n");
return -1;
}
}
evlist__for_each_entry(evlist, evsel)
evsel__fprintf(evsel, &details, stderr);
return 0;
}
int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
{
int err = 0;
char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
int ctlfd_pos = evlist->ctl_fd.pos;
struct pollfd *entries = evlist->core.pollfd.entries;
if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
return 0;
if (entries[ctlfd_pos].revents & POLLIN) {
err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
EVLIST_CTL_CMD_MAX_LEN);
if (err > 0) {
switch (*cmd) {
case EVLIST_CTL_CMD_ENABLE:
case EVLIST_CTL_CMD_DISABLE:
err = evlist__ctlfd_enable(evlist, cmd_data,
*cmd == EVLIST_CTL_CMD_ENABLE);
break;
case EVLIST_CTL_CMD_EVLIST:
err = evlist__ctlfd_list(evlist, cmd_data);
break;
case EVLIST_CTL_CMD_SNAPSHOT:
case EVLIST_CTL_CMD_STOP:
case EVLIST_CTL_CMD_PING:
break;
case EVLIST_CTL_CMD_ACK:
case EVLIST_CTL_CMD_UNSUPPORTED:
default:
pr_debug("ctlfd: unsupported %d\n", *cmd);
break;
}
if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
*cmd == EVLIST_CTL_CMD_SNAPSHOT))
evlist__ctlfd_ack(evlist);
}
}
if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
evlist__finalize_ctlfd(evlist);
else
entries[ctlfd_pos].revents = 0;
return err;
}
/**
* struct event_enable_time - perf record -D/--delay single time range.
* @start: start of time range to enable events in milliseconds
* @end: end of time range to enable events in milliseconds
*
* N.B. this structure is also accessed as an array of int.
*/
struct event_enable_time {
int start;
int end;
};
static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first)
{
const char *fmt = first ? "%u - %u %n" : " , %u - %u %n";
int ret, start, end, n;
ret = sscanf(str, fmt, &start, &end, &n);
if (ret != 2 || end <= start)
return -EINVAL;
if (range) {
range->start = start;
range->end = end;
}
return n;
}
static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range)
{
int incr = !!range;
bool first = true;
ssize_t ret, cnt;
for (cnt = 0; *str; cnt++) {
ret = parse_event_enable_time(str, range, first);
if (ret < 0)
return ret;
/* Check no overlap */
if (!first && range && range->start <= range[-1].end)
return -EINVAL;
str += ret;
range += incr;
first = false;
}
return cnt;
}
/**
* struct event_enable_timer - control structure for perf record -D/--delay.
* @evlist: event list
* @times: time ranges that events are enabled (N.B. this is also accessed as an
* array of int)
* @times_cnt: number of time ranges
* @timerfd: timer file descriptor
* @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray)
* @times_step: current position in (int *)@times)[],
* refer event_enable_timer__process()
*
* Note, this structure is only used when there are time ranges, not when there
* is only an initial delay.
*/
struct event_enable_timer {
struct evlist *evlist;
struct event_enable_time *times;
size_t times_cnt;
int timerfd;
int pollfd_pos;
size_t times_step;
};
static int str_to_delay(const char *str)
{
char *endptr;
long d;
d = strtol(str, &endptr, 10);
if (*endptr || d > INT_MAX || d < -1)
return 0;
return d;
}
int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts,
const char *str, int unset)
{
enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event;
struct event_enable_timer *eet;
ssize_t times_cnt;
ssize_t ret;
int err;
if (unset)
return 0;
opts->target.initial_delay = str_to_delay(str);
if (opts->target.initial_delay)
return 0;
ret = parse_event_enable_times(str, NULL);
if (ret < 0)
return ret;
times_cnt = ret;
if (times_cnt == 0)
return -EINVAL;
eet = zalloc(sizeof(*eet));
if (!eet)
return -ENOMEM;
eet->times = calloc(times_cnt, sizeof(*eet->times));
if (!eet->times) {
err = -ENOMEM;
goto free_eet;
}
if (parse_event_enable_times(str, eet->times) != times_cnt) {
err = -EINVAL;
goto free_eet_times;
}
eet->times_cnt = times_cnt;
eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
if (eet->timerfd == -1) {
err = -errno;
pr_err("timerfd_create failed: %s\n", strerror(errno));
goto free_eet_times;
}
eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags);
if (eet->pollfd_pos < 0) {
err = eet->pollfd_pos;
goto close_timerfd;
}
eet->evlist = evlist;
evlist->eet = eet;
opts->target.initial_delay = eet->times[0].start;
return 0;
close_timerfd:
close(eet->timerfd);
free_eet_times:
zfree(&eet->times);
free_eet:
free(eet);
return err;
}
static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms)
{
struct itimerspec its = {
.it_value.tv_sec = ms / MSEC_PER_SEC,
.it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC,
};
int err = 0;
if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) {
err = -errno;
pr_err("timerfd_settime failed: %s\n", strerror(errno));
}
return err;
}
int event_enable_timer__start(struct event_enable_timer *eet)
{
int ms;
if (!eet)
return 0;
ms = eet->times[0].end - eet->times[0].start;
eet->times_step = 1;
return event_enable_timer__set_timer(eet, ms);
}
int event_enable_timer__process(struct event_enable_timer *eet)
{
struct pollfd *entries;
short revents;
if (!eet)
return 0;
entries = eet->evlist->core.pollfd.entries;
revents = entries[eet->pollfd_pos].revents;
entries[eet->pollfd_pos].revents = 0;
if (revents & POLLIN) {
size_t step = eet->times_step;
size_t pos = step / 2;
if (step & 1) {
evlist__disable_non_dummy(eet->evlist);
pr_info(EVLIST_DISABLED_MSG);
if (pos >= eet->times_cnt - 1) {
/* Disarm timer */
event_enable_timer__set_timer(eet, 0);
return 1; /* Stop */
}
} else {
evlist__enable_non_dummy(eet->evlist);
pr_info(EVLIST_ENABLED_MSG);
}
step += 1;
pos = step / 2;
if (pos < eet->times_cnt) {
int *times = (int *)eet->times; /* Accessing 'times' as array of int */
int ms = times[step] - times[step - 1];
eet->times_step = step;
return event_enable_timer__set_timer(eet, ms);
}
}
return 0;
}
void event_enable_timer__exit(struct event_enable_timer **ep)
{
if (!ep || !*ep)
return;
zfree(&(*ep)->times);
zfree(ep);
}
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.idx == idx)
return evsel;
}
return NULL;
}
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
{
struct evsel *evsel;
int printed = 0;
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_dummy_event(evsel))
continue;
if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
} else {
printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
break;
}
}
return printed;
}
void evlist__check_mem_load_aux(struct evlist *evlist)
{
struct evsel *leader, *evsel, *pos;
/*
* For some platforms, the 'mem-loads' event is required to use
* together with 'mem-loads-aux' within a group and 'mem-loads-aux'
* must be the group leader. Now we disable this group before reporting
* because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
* any valid memory load information.
*/
evlist__for_each_entry(evlist, evsel) {
leader = evsel__leader(evsel);
if (leader == evsel)
continue;
if (leader->name && strstr(leader->name, "mem-loads-aux")) {
for_each_group_evsel(pos, leader) {
evsel__set_leader(pos, pos);
pos->core.nr_members = 0;
}
}
}
}
/**
* evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs
* and warn if the user CPU list is inapplicable for the event's PMU's
* CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a
* user requested CPU and so any online CPU is applicable. Core PMUs handle
* events on the CPUs in their list and otherwise the event isn't supported.
* @evlist: The list of events being checked.
* @cpu_list: The user provided list of CPUs.
*/
void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list)
{
struct perf_cpu_map *user_requested_cpus;
struct evsel *pos;
if (!cpu_list)
return;
user_requested_cpus = perf_cpu_map__new(cpu_list);
if (!user_requested_cpus)
return;
evlist__for_each_entry(evlist, pos) {
struct perf_cpu_map *intersect, *to_test;
const struct perf_pmu *pmu = evsel__find_pmu(pos);
to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online();
intersect = perf_cpu_map__intersect(to_test, user_requested_cpus);
if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
char buf[128];
cpu_map__snprint(to_test, buf, sizeof(buf));
pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n",
cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos));
}
perf_cpu_map__put(intersect);
}
perf_cpu_map__put(user_requested_cpus);
}
| linux-master | tools/perf/util/evlist.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Compare and figure out the top N hottest streams
* Copyright (c) 2020, Intel Corporation.
* Author: Jin Yao
*/
#include <inttypes.h>
#include <stdlib.h>
#include <linux/zalloc.h>
#include "debug.h"
#include "hist.h"
#include "sort.h"
#include "stream.h"
#include "evlist.h"
static void evsel_streams__delete(struct evsel_streams *es, int nr_evsel)
{
for (int i = 0; i < nr_evsel; i++)
zfree(&es[i].streams);
free(es);
}
void evlist_streams__delete(struct evlist_streams *els)
{
evsel_streams__delete(els->ev_streams, els->nr_evsel);
free(els);
}
static struct evlist_streams *evlist_streams__new(int nr_evsel,
int nr_streams_max)
{
struct evlist_streams *els;
struct evsel_streams *es;
els = zalloc(sizeof(*els));
if (!els)
return NULL;
es = calloc(nr_evsel, sizeof(struct evsel_streams));
if (!es) {
free(els);
return NULL;
}
for (int i = 0; i < nr_evsel; i++) {
struct evsel_streams *s = &es[i];
s->streams = calloc(nr_streams_max, sizeof(struct stream));
if (!s->streams)
goto err;
s->nr_streams_max = nr_streams_max;
s->evsel_idx = -1;
}
els->ev_streams = es;
els->nr_evsel = nr_evsel;
return els;
err:
evsel_streams__delete(es, nr_evsel);
return NULL;
}
/*
* The cnodes with high hit number are hot callchains.
*/
static void evsel_streams__set_hot_cnode(struct evsel_streams *es,
struct callchain_node *cnode)
{
int i, idx = 0;
u64 hit;
if (es->nr_streams < es->nr_streams_max) {
i = es->nr_streams;
es->streams[i].cnode = cnode;
es->nr_streams++;
return;
}
/*
* Considering a few number of hot streams, only use simple
* way to find the cnode with smallest hit number and replace.
*/
hit = (es->streams[0].cnode)->hit;
for (i = 1; i < es->nr_streams; i++) {
if ((es->streams[i].cnode)->hit < hit) {
hit = (es->streams[i].cnode)->hit;
idx = i;
}
}
if (cnode->hit > hit)
es->streams[idx].cnode = cnode;
}
static void update_hot_callchain(struct hist_entry *he,
struct evsel_streams *es)
{
struct rb_root *root = &he->sorted_chain;
struct rb_node *rb_node = rb_first(root);
struct callchain_node *cnode;
while (rb_node) {
cnode = rb_entry(rb_node, struct callchain_node, rb_node);
evsel_streams__set_hot_cnode(es, cnode);
rb_node = rb_next(rb_node);
}
}
static void init_hot_callchain(struct hists *hists, struct evsel_streams *es)
{
struct rb_node *next = rb_first_cached(&hists->entries);
while (next) {
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node);
update_hot_callchain(he, es);
next = rb_next(&he->rb_node);
}
es->streams_hits = callchain_total_hits(hists);
}
static int evlist__init_callchain_streams(struct evlist *evlist,
struct evlist_streams *els)
{
struct evsel_streams *es = els->ev_streams;
struct evsel *pos;
int i = 0;
BUG_ON(els->nr_evsel < evlist->core.nr_entries);
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
hists__output_resort(hists, NULL);
init_hot_callchain(hists, &es[i]);
es[i].evsel_idx = pos->core.idx;
i++;
}
return 0;
}
struct evlist_streams *evlist__create_streams(struct evlist *evlist,
int nr_streams_max)
{
int nr_evsel = evlist->core.nr_entries, ret = -1;
struct evlist_streams *els = evlist_streams__new(nr_evsel,
nr_streams_max);
if (!els)
return NULL;
ret = evlist__init_callchain_streams(evlist, els);
if (ret) {
evlist_streams__delete(els);
return NULL;
}
return els;
}
struct evsel_streams *evsel_streams__entry(struct evlist_streams *els,
int evsel_idx)
{
struct evsel_streams *es = els->ev_streams;
for (int i = 0; i < els->nr_evsel; i++) {
if (es[i].evsel_idx == evsel_idx)
return &es[i];
}
return NULL;
}
static struct stream *stream__callchain_match(struct stream *base_stream,
struct evsel_streams *es_pair)
{
for (int i = 0; i < es_pair->nr_streams; i++) {
struct stream *pair_stream = &es_pair->streams[i];
if (callchain_cnode_matched(base_stream->cnode,
pair_stream->cnode)) {
return pair_stream;
}
}
return NULL;
}
static struct stream *stream__match(struct stream *base_stream,
struct evsel_streams *es_pair)
{
return stream__callchain_match(base_stream, es_pair);
}
static void stream__link(struct stream *base_stream, struct stream *pair_stream)
{
base_stream->pair_cnode = pair_stream->cnode;
pair_stream->pair_cnode = base_stream->cnode;
}
void evsel_streams__match(struct evsel_streams *es_base,
struct evsel_streams *es_pair)
{
for (int i = 0; i < es_base->nr_streams; i++) {
struct stream *base_stream = &es_base->streams[i];
struct stream *pair_stream;
pair_stream = stream__match(base_stream, es_pair);
if (pair_stream)
stream__link(base_stream, pair_stream);
}
}
static void print_callchain_pair(struct stream *base_stream, int idx,
struct evsel_streams *es_base,
struct evsel_streams *es_pair)
{
struct callchain_node *base_cnode = base_stream->cnode;
struct callchain_node *pair_cnode = base_stream->pair_cnode;
struct callchain_list *base_chain, *pair_chain;
char buf1[512], buf2[512], cbuf1[256], cbuf2[256];
char *s1, *s2;
double pct;
printf("\nhot chain pair %d:\n", idx);
pct = (double)base_cnode->hit / (double)es_base->streams_hits;
scnprintf(buf1, sizeof(buf1), "cycles: %ld, hits: %.2f%%",
callchain_avg_cycles(base_cnode), pct * 100.0);
pct = (double)pair_cnode->hit / (double)es_pair->streams_hits;
scnprintf(buf2, sizeof(buf2), "cycles: %ld, hits: %.2f%%",
callchain_avg_cycles(pair_cnode), pct * 100.0);
printf("%35s\t%35s\n", buf1, buf2);
printf("%35s\t%35s\n",
"---------------------------",
"--------------------------");
pair_chain = list_first_entry(&pair_cnode->val,
struct callchain_list,
list);
list_for_each_entry(base_chain, &base_cnode->val, list) {
if (&pair_chain->list == &pair_cnode->val)
return;
s1 = callchain_list__sym_name(base_chain, cbuf1, sizeof(cbuf1),
false);
s2 = callchain_list__sym_name(pair_chain, cbuf2, sizeof(cbuf2),
false);
scnprintf(buf1, sizeof(buf1), "%35s\t%35s", s1, s2);
printf("%s\n", buf1);
pair_chain = list_next_entry(pair_chain, list);
}
}
static void print_stream_callchain(struct stream *stream, int idx,
struct evsel_streams *es, bool pair)
{
struct callchain_node *cnode = stream->cnode;
struct callchain_list *chain;
char buf[512], cbuf[256], *s;
double pct;
printf("\nhot chain %d:\n", idx);
pct = (double)cnode->hit / (double)es->streams_hits;
scnprintf(buf, sizeof(buf), "cycles: %ld, hits: %.2f%%",
callchain_avg_cycles(cnode), pct * 100.0);
if (pair) {
printf("%35s\t%35s\n", "", buf);
printf("%35s\t%35s\n",
"", "--------------------------");
} else {
printf("%35s\n", buf);
printf("%35s\n", "--------------------------");
}
list_for_each_entry(chain, &cnode->val, list) {
s = callchain_list__sym_name(chain, cbuf, sizeof(cbuf), false);
if (pair)
scnprintf(buf, sizeof(buf), "%35s\t%35s", "", s);
else
scnprintf(buf, sizeof(buf), "%35s", s);
printf("%s\n", buf);
}
}
static void callchain_streams_report(struct evsel_streams *es_base,
struct evsel_streams *es_pair)
{
struct stream *base_stream;
int i, idx = 0;
printf("[ Matched hot streams ]\n");
for (i = 0; i < es_base->nr_streams; i++) {
base_stream = &es_base->streams[i];
if (base_stream->pair_cnode) {
print_callchain_pair(base_stream, ++idx,
es_base, es_pair);
}
}
idx = 0;
printf("\n[ Hot streams in old perf data only ]\n");
for (i = 0; i < es_base->nr_streams; i++) {
base_stream = &es_base->streams[i];
if (!base_stream->pair_cnode) {
print_stream_callchain(base_stream, ++idx,
es_base, false);
}
}
idx = 0;
printf("\n[ Hot streams in new perf data only ]\n");
for (i = 0; i < es_pair->nr_streams; i++) {
base_stream = &es_pair->streams[i];
if (!base_stream->pair_cnode) {
print_stream_callchain(base_stream, ++idx,
es_pair, true);
}
}
}
void evsel_streams__report(struct evsel_streams *es_base,
struct evsel_streams *es_pair)
{
return callchain_streams_report(es_base, es_pair);
}
| linux-master | tools/perf/util/stream.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
*
* Parts came from builtin-{top,stat,record}.c, see those files for further
* copyright notes.
*/
#include <byteswap.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/bitops.h>
#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/zalloc.h>
#include <sys/ioctl.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <dirent.h>
#include <stdlib.h>
#include <perf/evsel.h>
#include "asm/bug.h"
#include "bpf_counter.h"
#include "callchain.h"
#include "cgroup.h"
#include "counts.h"
#include "event.h"
#include "evsel.h"
#include "util/env.h"
#include "util/evsel_config.h"
#include "util/evsel_fprintf.h"
#include "evlist.h"
#include <perf/cpumap.h>
#include "thread_map.h"
#include "target.h"
#include "perf_regs.h"
#include "record.h"
#include "debug.h"
#include "trace-event.h"
#include "stat.h"
#include "string2.h"
#include "memswap.h"
#include "util.h"
#include "util/hashmap.h"
#include "off_cpu.h"
#include "pmu.h"
#include "pmus.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include "util/bpf-filter.h"
#include <internal/xyarray.h>
#include <internal/lib.h>
#include <internal/threadmap.h>
#include <linux/ctype.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
struct perf_missing_features perf_missing_features;
static clockid_t clockid;
static const char *const perf_tool_event__tool_names[PERF_TOOL_MAX] = {
NULL,
"duration_time",
"user_time",
"system_time",
};
const char *perf_tool_event__to_str(enum perf_tool_event ev)
{
if (ev > PERF_TOOL_NONE && ev < PERF_TOOL_MAX)
return perf_tool_event__tool_names[ev];
return NULL;
}
enum perf_tool_event perf_tool_event__from_str(const char *str)
{
int i;
perf_tool_event__for_each_event(i) {
if (!strcmp(str, perf_tool_event__tool_names[i]))
return i;
}
return PERF_TOOL_NONE;
}
static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
{
return 0;
}
void __weak test_attr__ready(void) { }
static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
{
}
static struct {
size_t size;
int (*init)(struct evsel *evsel);
void (*fini)(struct evsel *evsel);
} perf_evsel__object = {
.size = sizeof(struct evsel),
.init = evsel__no_extra_init,
.fini = evsel__no_extra_fini,
};
int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
void (*fini)(struct evsel *evsel))
{
if (object_size == 0)
goto set_methods;
if (perf_evsel__object.size > object_size)
return -EINVAL;
perf_evsel__object.size = object_size;
set_methods:
if (init != NULL)
perf_evsel__object.init = init;
if (fini != NULL)
perf_evsel__object.fini = fini;
return 0;
}
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
int __evsel__sample_size(u64 sample_type)
{
u64 mask = sample_type & PERF_SAMPLE_MASK;
int size = 0;
int i;
for (i = 0; i < 64; i++) {
if (mask & (1ULL << i))
size++;
}
size *= sizeof(u64);
return size;
}
/**
* __perf_evsel__calc_id_pos - calculate id_pos.
* @sample_type: sample type
*
* This function returns the position of the event id (PERF_SAMPLE_ID or
* PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
* perf_record_sample.
*/
static int __perf_evsel__calc_id_pos(u64 sample_type)
{
int idx = 0;
if (sample_type & PERF_SAMPLE_IDENTIFIER)
return 0;
if (!(sample_type & PERF_SAMPLE_ID))
return -1;
if (sample_type & PERF_SAMPLE_IP)
idx += 1;
if (sample_type & PERF_SAMPLE_TID)
idx += 1;
if (sample_type & PERF_SAMPLE_TIME)
idx += 1;
if (sample_type & PERF_SAMPLE_ADDR)
idx += 1;
return idx;
}
/**
* __perf_evsel__calc_is_pos - calculate is_pos.
* @sample_type: sample type
*
* This function returns the position (counting backwards) of the event id
* (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
* sample_id_all is used there is an id sample appended to non-sample events.
*/
static int __perf_evsel__calc_is_pos(u64 sample_type)
{
int idx = 1;
if (sample_type & PERF_SAMPLE_IDENTIFIER)
return 1;
if (!(sample_type & PERF_SAMPLE_ID))
return -1;
if (sample_type & PERF_SAMPLE_CPU)
idx += 1;
if (sample_type & PERF_SAMPLE_STREAM_ID)
idx += 1;
return idx;
}
void evsel__calc_id_pos(struct evsel *evsel)
{
evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
}
void __evsel__set_sample_bit(struct evsel *evsel,
enum perf_event_sample_format bit)
{
if (!(evsel->core.attr.sample_type & bit)) {
evsel->core.attr.sample_type |= bit;
evsel->sample_size += sizeof(u64);
evsel__calc_id_pos(evsel);
}
}
void __evsel__reset_sample_bit(struct evsel *evsel,
enum perf_event_sample_format bit)
{
if (evsel->core.attr.sample_type & bit) {
evsel->core.attr.sample_type &= ~bit;
evsel->sample_size -= sizeof(u64);
evsel__calc_id_pos(evsel);
}
}
void evsel__set_sample_id(struct evsel *evsel,
bool can_sample_identifier)
{
if (can_sample_identifier) {
evsel__reset_sample_bit(evsel, ID);
evsel__set_sample_bit(evsel, IDENTIFIER);
} else {
evsel__set_sample_bit(evsel, ID);
}
evsel->core.attr.read_format |= PERF_FORMAT_ID;
}
/**
* evsel__is_function_event - Return whether given evsel is a function
* trace event
*
* @evsel - evsel selector to be tested
*
* Return %true if event is function trace event
*/
bool evsel__is_function_event(struct evsel *evsel)
{
#define FUNCTION_EVENT "ftrace:function"
return evsel->name &&
!strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
#undef FUNCTION_EVENT
}
void evsel__init(struct evsel *evsel,
struct perf_event_attr *attr, int idx)
{
perf_evsel__init(&evsel->core, attr, idx);
evsel->tracking = !idx;
evsel->unit = strdup("");
evsel->scale = 1.0;
evsel->max_events = ULONG_MAX;
evsel->evlist = NULL;
evsel->bpf_obj = NULL;
evsel->bpf_fd = -1;
INIT_LIST_HEAD(&evsel->config_terms);
INIT_LIST_HEAD(&evsel->bpf_counter_list);
INIT_LIST_HEAD(&evsel->bpf_filters);
perf_evsel__object.init(evsel);
evsel->sample_size = __evsel__sample_size(attr->sample_type);
evsel__calc_id_pos(evsel);
evsel->cmdline_group_boundary = false;
evsel->metric_events = NULL;
evsel->per_pkg_mask = NULL;
evsel->collect_stat = false;
evsel->pmu_name = NULL;
evsel->group_pmu_name = NULL;
evsel->skippable = false;
}
struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
{
struct evsel *evsel = zalloc(perf_evsel__object.size);
if (!evsel)
return NULL;
evsel__init(evsel, attr, idx);
if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
evsel->core.attr.sample_period = 1;
}
if (evsel__is_clock(evsel)) {
free((char *)evsel->unit);
evsel->unit = strdup("msec");
evsel->scale = 1e-6;
}
return evsel;
}
int copy_config_terms(struct list_head *dst, struct list_head *src)
{
struct evsel_config_term *pos, *tmp;
list_for_each_entry(pos, src, list) {
tmp = malloc(sizeof(*tmp));
if (tmp == NULL)
return -ENOMEM;
*tmp = *pos;
if (tmp->free_str) {
tmp->val.str = strdup(pos->val.str);
if (tmp->val.str == NULL) {
free(tmp);
return -ENOMEM;
}
}
list_add_tail(&tmp->list, dst);
}
return 0;
}
static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
{
return copy_config_terms(&dst->config_terms, &src->config_terms);
}
/**
* evsel__clone - create a new evsel copied from @orig
* @orig: original evsel
*
* The assumption is that @orig is not configured nor opened yet.
* So we only care about the attributes that can be set while it's parsed.
*/
struct evsel *evsel__clone(struct evsel *orig)
{
struct evsel *evsel;
BUG_ON(orig->core.fd);
BUG_ON(orig->counts);
BUG_ON(orig->priv);
BUG_ON(orig->per_pkg_mask);
/* cannot handle BPF objects for now */
if (orig->bpf_obj)
return NULL;
evsel = evsel__new(&orig->core.attr);
if (evsel == NULL)
return NULL;
evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
evsel->core.threads = perf_thread_map__get(orig->core.threads);
evsel->core.nr_members = orig->core.nr_members;
evsel->core.system_wide = orig->core.system_wide;
evsel->core.requires_cpu = orig->core.requires_cpu;
evsel->core.is_pmu_core = orig->core.is_pmu_core;
if (orig->name) {
evsel->name = strdup(orig->name);
if (evsel->name == NULL)
goto out_err;
}
if (orig->group_name) {
evsel->group_name = strdup(orig->group_name);
if (evsel->group_name == NULL)
goto out_err;
}
if (orig->pmu_name) {
evsel->pmu_name = strdup(orig->pmu_name);
if (evsel->pmu_name == NULL)
goto out_err;
}
if (orig->group_pmu_name) {
evsel->group_pmu_name = strdup(orig->group_pmu_name);
if (evsel->group_pmu_name == NULL)
goto out_err;
}
if (orig->filter) {
evsel->filter = strdup(orig->filter);
if (evsel->filter == NULL)
goto out_err;
}
if (orig->metric_id) {
evsel->metric_id = strdup(orig->metric_id);
if (evsel->metric_id == NULL)
goto out_err;
}
evsel->cgrp = cgroup__get(orig->cgrp);
#ifdef HAVE_LIBTRACEEVENT
evsel->tp_format = orig->tp_format;
#endif
evsel->handler = orig->handler;
evsel->core.leader = orig->core.leader;
evsel->max_events = orig->max_events;
evsel->tool_event = orig->tool_event;
free((char *)evsel->unit);
evsel->unit = strdup(orig->unit);
if (evsel->unit == NULL)
goto out_err;
evsel->scale = orig->scale;
evsel->snapshot = orig->snapshot;
evsel->per_pkg = orig->per_pkg;
evsel->percore = orig->percore;
evsel->precise_max = orig->precise_max;
evsel->is_libpfm_event = orig->is_libpfm_event;
evsel->exclude_GH = orig->exclude_GH;
evsel->sample_read = orig->sample_read;
evsel->auto_merge_stats = orig->auto_merge_stats;
evsel->collect_stat = orig->collect_stat;
evsel->weak_group = orig->weak_group;
evsel->use_config_name = orig->use_config_name;
evsel->pmu = orig->pmu;
if (evsel__copy_config_terms(evsel, orig) < 0)
goto out_err;
return evsel;
out_err:
evsel__delete(evsel);
return NULL;
}
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
#ifdef HAVE_LIBTRACEEVENT
struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
{
struct evsel *evsel = zalloc(perf_evsel__object.size);
int err = -ENOMEM;
if (evsel == NULL) {
goto out_err;
} else {
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
};
if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
goto out_free;
evsel->tp_format = trace_event__tp_format(sys, name);
if (IS_ERR(evsel->tp_format)) {
err = PTR_ERR(evsel->tp_format);
goto out_free;
}
event_attr_init(&attr);
attr.config = evsel->tp_format->id;
attr.sample_period = 1;
evsel__init(evsel, &attr, idx);
}
return evsel;
out_free:
zfree(&evsel->name);
free(evsel);
out_err:
return ERR_PTR(err);
}
#endif
const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
"cycles",
"instructions",
"cache-references",
"cache-misses",
"branches",
"branch-misses",
"bus-cycles",
"stalled-cycles-frontend",
"stalled-cycles-backend",
"ref-cycles",
};
char *evsel__bpf_counter_events;
bool evsel__match_bpf_counter_events(const char *name)
{
int name_len;
bool match;
char *ptr;
if (!evsel__bpf_counter_events)
return false;
ptr = strstr(evsel__bpf_counter_events, name);
name_len = strlen(name);
/* check name matches a full token in evsel__bpf_counter_events */
match = (ptr != NULL) &&
((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
return match;
}
static const char *__evsel__hw_name(u64 config)
{
if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
return evsel__hw_names[config];
return "unknown-hardware";
}
static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
{
int colon = 0, r = 0;
struct perf_event_attr *attr = &evsel->core.attr;
bool exclude_guest_default = false;
#define MOD_PRINT(context, mod) do { \
if (!attr->exclude_##context) { \
if (!colon) colon = ++r; \
r += scnprintf(bf + r, size - r, "%c", mod); \
} } while(0)
if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
MOD_PRINT(kernel, 'k');
MOD_PRINT(user, 'u');
MOD_PRINT(hv, 'h');
exclude_guest_default = true;
}
if (attr->precise_ip) {
if (!colon)
colon = ++r;
r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
exclude_guest_default = true;
}
if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
MOD_PRINT(host, 'H');
MOD_PRINT(guest, 'G');
}
#undef MOD_PRINT
if (colon)
bf[colon - 1] = ':';
return r;
}
int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
{
return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
}
static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
{
int r = arch_evsel__hw_name(evsel, bf, size);
return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = {
"cpu-clock",
"task-clock",
"page-faults",
"context-switches",
"cpu-migrations",
"minor-faults",
"major-faults",
"alignment-faults",
"emulation-faults",
"dummy",
};
static const char *__evsel__sw_name(u64 config)
{
if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config])
return evsel__sw_names[config];
return "unknown-software";
}
static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
{
int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
static int evsel__tool_name(enum perf_tool_event ev, char *bf, size_t size)
{
return scnprintf(bf, size, "%s", perf_tool_event__to_str(ev));
}
static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
{
int r;
r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
if (type & HW_BREAKPOINT_R)
r += scnprintf(bf + r, size - r, "r");
if (type & HW_BREAKPOINT_W)
r += scnprintf(bf + r, size - r, "w");
if (type & HW_BREAKPOINT_X)
r += scnprintf(bf + r, size - r, "x");
return r;
}
static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
{
struct perf_event_attr *attr = &evsel->core.attr;
int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
{ "LLC", "L2", },
{ "dTLB", "d-tlb", "Data-TLB", },
{ "iTLB", "i-tlb", "Instruction-TLB", },
{ "branch", "branches", "bpu", "btb", "bpc", },
{ "node", },
};
const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
{ "load", "loads", "read", },
{ "store", "stores", "write", },
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
{ "refs", "Reference", "ops", "access", },
{ "misses", "miss", },
};
#define C(x) PERF_COUNT_HW_CACHE_##x
#define CACHE_READ (1 << C(OP_READ))
#define CACHE_WRITE (1 << C(OP_WRITE))
#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
#define COP(x) (1 << x)
/*
* cache operation stat
* L1I : Read and prefetch only
* ITLB and BPU : Read-only
*/
static const unsigned long evsel__hw_cache_stat[C(MAX)] = {
[C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
[C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
[C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
[C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
[C(ITLB)] = (CACHE_READ),
[C(BPU)] = (CACHE_READ),
[C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
};
bool evsel__is_cache_op_valid(u8 type, u8 op)
{
if (evsel__hw_cache_stat[type] & COP(op))
return true; /* valid */
else
return false; /* invalid */
}
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
{
if (result) {
return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0],
evsel__hw_cache_op[op][0],
evsel__hw_cache_result[result][0]);
}
return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0],
evsel__hw_cache_op[op][1]);
}
static int __evsel__hw_cache_name(u64 config, char *bf, size_t size)
{
u8 op, result, type = (config >> 0) & 0xff;
const char *err = "unknown-ext-hardware-cache-type";
if (type >= PERF_COUNT_HW_CACHE_MAX)
goto out_err;
op = (config >> 8) & 0xff;
err = "unknown-ext-hardware-cache-op";
if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
goto out_err;
result = (config >> 16) & 0xff;
err = "unknown-ext-hardware-cache-result";
if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
goto out_err;
err = "invalid-cache";
if (!evsel__is_cache_op_valid(type, op))
goto out_err;
return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
out_err:
return scnprintf(bf, size, "%s", err);
}
static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
{
int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
}
static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
{
int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
}
const char *evsel__name(struct evsel *evsel)
{
char bf[128];
if (!evsel)
goto out_unknown;
if (evsel->name)
return evsel->name;
switch (evsel->core.attr.type) {
case PERF_TYPE_RAW:
evsel__raw_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_HARDWARE:
evsel__hw_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_HW_CACHE:
evsel__hw_cache_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_SOFTWARE:
if (evsel__is_tool(evsel))
evsel__tool_name(evsel->tool_event, bf, sizeof(bf));
else
evsel__sw_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_TRACEPOINT:
scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
break;
case PERF_TYPE_BREAKPOINT:
evsel__bp_name(evsel, bf, sizeof(bf));
break;
default:
scnprintf(bf, sizeof(bf), "unknown attr type: %d",
evsel->core.attr.type);
break;
}
evsel->name = strdup(bf);
if (evsel->name)
return evsel->name;
out_unknown:
return "unknown";
}
bool evsel__name_is(struct evsel *evsel, const char *name)
{
return !strcmp(evsel__name(evsel), name);
}
const char *evsel__metric_id(const struct evsel *evsel)
{
if (evsel->metric_id)
return evsel->metric_id;
if (evsel__is_tool(evsel))
return perf_tool_event__to_str(evsel->tool_event);
return "unknown";
}
const char *evsel__group_name(struct evsel *evsel)
{
return evsel->group_name ?: "anon group";
}
/*
* Returns the group details for the specified leader,
* with following rules.
*
* For record -e '{cycles,instructions}'
* 'anon group { cycles:u, instructions:u }'
*
* For record -e 'cycles,instructions' and report --group
* 'cycles:u, instructions:u'
*/
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
{
int ret = 0;
struct evsel *pos;
const char *group_name = evsel__group_name(evsel);
if (!evsel->forced_leader)
ret = scnprintf(buf, size, "%s { ", group_name);
ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel));
for_each_group_member(pos, evsel)
ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos));
if (!evsel->forced_leader)
ret += scnprintf(buf + ret, size - ret, " }");
return ret;
}
static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
struct callchain_param *param)
{
bool function = evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->core.attr;
const char *arch = perf_env__arch(evsel__env(evsel));
evsel__set_sample_bit(evsel, CALLCHAIN);
attr->sample_max_stack = param->max_stack;
if (opts->kernel_callchains)
attr->exclude_callchain_user = 1;
if (opts->user_callchains)
attr->exclude_callchain_kernel = 1;
if (param->record_mode == CALLCHAIN_LBR) {
if (!opts->branch_stack) {
if (attr->exclude_user) {
pr_warning("LBR callstack option is only available "
"to get user callchain information. "
"Falling back to framepointers.\n");
} else {
evsel__set_sample_bit(evsel, BRANCH_STACK);
attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
PERF_SAMPLE_BRANCH_CALL_STACK |
PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_HW_INDEX;
}
} else
pr_warning("Cannot use LBR callstack with branch stack. "
"Falling back to framepointers.\n");
}
if (param->record_mode == CALLCHAIN_DWARF) {
if (!function) {
evsel__set_sample_bit(evsel, REGS_USER);
evsel__set_sample_bit(evsel, STACK_USER);
if (opts->sample_user_regs &&
DWARF_MINIMAL_REGS(arch) != arch__user_reg_mask()) {
attr->sample_regs_user |= DWARF_MINIMAL_REGS(arch);
pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
"specifying a subset with --user-regs may render DWARF unwinding unreliable, "
"so the minimal registers set (IP, SP) is explicitly forced.\n");
} else {
attr->sample_regs_user |= arch__user_reg_mask();
}
attr->sample_stack_user = param->dump_size;
attr->exclude_callchain_user = 1;
} else {
pr_info("Cannot use DWARF unwind for function trace event,"
" falling back to framepointers.\n");
}
}
if (function) {
pr_info("Disabling user space callchains for function trace event.\n");
attr->exclude_callchain_user = 1;
}
}
void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
struct callchain_param *param)
{
if (param->enabled)
return __evsel__config_callchain(evsel, opts, param);
}
static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
{
struct perf_event_attr *attr = &evsel->core.attr;
evsel__reset_sample_bit(evsel, CALLCHAIN);
if (param->record_mode == CALLCHAIN_LBR) {
evsel__reset_sample_bit(evsel, BRANCH_STACK);
attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
PERF_SAMPLE_BRANCH_CALL_STACK |
PERF_SAMPLE_BRANCH_HW_INDEX);
}
if (param->record_mode == CALLCHAIN_DWARF) {
evsel__reset_sample_bit(evsel, REGS_USER);
evsel__reset_sample_bit(evsel, STACK_USER);
}
}
static void evsel__apply_config_terms(struct evsel *evsel,
struct record_opts *opts, bool track)
{
struct evsel_config_term *term;
struct list_head *config_terms = &evsel->config_terms;
struct perf_event_attr *attr = &evsel->core.attr;
/* callgraph default */
struct callchain_param param = {
.record_mode = callchain_param.record_mode,
};
u32 dump_size = 0;
int max_stack = 0;
const char *callgraph_buf = NULL;
list_for_each_entry(term, config_terms, list) {
switch (term->type) {
case EVSEL__CONFIG_TERM_PERIOD:
if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
attr->sample_period = term->val.period;
attr->freq = 0;
evsel__reset_sample_bit(evsel, PERIOD);
}
break;
case EVSEL__CONFIG_TERM_FREQ:
if (!(term->weak && opts->user_freq != UINT_MAX)) {
attr->sample_freq = term->val.freq;
attr->freq = 1;
evsel__set_sample_bit(evsel, PERIOD);
}
break;
case EVSEL__CONFIG_TERM_TIME:
if (term->val.time)
evsel__set_sample_bit(evsel, TIME);
else
evsel__reset_sample_bit(evsel, TIME);
break;
case EVSEL__CONFIG_TERM_CALLGRAPH:
callgraph_buf = term->val.str;
break;
case EVSEL__CONFIG_TERM_BRANCH:
if (term->val.str && strcmp(term->val.str, "no")) {
evsel__set_sample_bit(evsel, BRANCH_STACK);
parse_branch_str(term->val.str,
&attr->branch_sample_type);
} else
evsel__reset_sample_bit(evsel, BRANCH_STACK);
break;
case EVSEL__CONFIG_TERM_STACK_USER:
dump_size = term->val.stack_user;
break;
case EVSEL__CONFIG_TERM_MAX_STACK:
max_stack = term->val.max_stack;
break;
case EVSEL__CONFIG_TERM_MAX_EVENTS:
evsel->max_events = term->val.max_events;
break;
case EVSEL__CONFIG_TERM_INHERIT:
/*
* attr->inherit should has already been set by
* evsel__config. If user explicitly set
* inherit using config terms, override global
* opt->no_inherit setting.
*/
attr->inherit = term->val.inherit ? 1 : 0;
break;
case EVSEL__CONFIG_TERM_OVERWRITE:
attr->write_backward = term->val.overwrite ? 1 : 0;
break;
case EVSEL__CONFIG_TERM_DRV_CFG:
break;
case EVSEL__CONFIG_TERM_PERCORE:
break;
case EVSEL__CONFIG_TERM_AUX_OUTPUT:
attr->aux_output = term->val.aux_output ? 1 : 0;
break;
case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
/* Already applied by auxtrace */
break;
case EVSEL__CONFIG_TERM_CFG_CHG:
break;
default:
break;
}
}
/* User explicitly set per-event callgraph, clear the old setting and reset. */
if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
bool sample_address = false;
if (max_stack) {
param.max_stack = max_stack;
if (callgraph_buf == NULL)
callgraph_buf = "fp";
}
/* parse callgraph parameters */
if (callgraph_buf != NULL) {
if (!strcmp(callgraph_buf, "no")) {
param.enabled = false;
param.record_mode = CALLCHAIN_NONE;
} else {
param.enabled = true;
if (parse_callchain_record(callgraph_buf, ¶m)) {
pr_err("per-event callgraph setting for %s failed. "
"Apply callgraph global setting for it\n",
evsel->name);
return;
}
if (param.record_mode == CALLCHAIN_DWARF)
sample_address = true;
}
}
if (dump_size > 0) {
dump_size = round_up(dump_size, sizeof(u64));
param.dump_size = dump_size;
}
/* If global callgraph set, clear it */
if (callchain_param.enabled)
evsel__reset_callgraph(evsel, &callchain_param);
/* set perf-event callgraph */
if (param.enabled) {
if (sample_address) {
evsel__set_sample_bit(evsel, ADDR);
evsel__set_sample_bit(evsel, DATA_SRC);
evsel->core.attr.mmap_data = track;
}
evsel__config_callchain(evsel, opts, ¶m);
}
}
}
struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
{
struct evsel_config_term *term, *found_term = NULL;
list_for_each_entry(term, &evsel->config_terms, list) {
if (term->type == type)
found_term = term;
}
return found_term;
}
void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
{
evsel__set_sample_bit(evsel, WEIGHT);
}
void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
struct perf_event_attr *attr __maybe_unused)
{
}
static void evsel__set_default_freq_period(struct record_opts *opts,
struct perf_event_attr *attr)
{
if (opts->freq) {
attr->freq = 1;
attr->sample_freq = opts->freq;
} else {
attr->sample_period = opts->default_interval;
}
}
static bool evsel__is_offcpu_event(struct evsel *evsel)
{
return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT);
}
/*
* The enable_on_exec/disabled value strategy:
*
* 1) For any type of traced program:
* - all independent events and group leaders are disabled
* - all group members are enabled
*
* Group members are ruled by group leaders. They need to
* be enabled, because the group scheduling relies on that.
*
* 2) For traced programs executed by perf:
* - all independent events and group leaders have
* enable_on_exec set
* - we don't specifically enable or disable any event during
* the record command
*
* Independent events and group leaders are initially disabled
* and get enabled by exec. Group members are ruled by group
* leaders as stated in 1).
*
* 3) For traced programs attached by perf (pid/tid):
* - we specifically enable or disable all events during
* the record command
*
* When attaching events to already running traced we
* enable/disable events specifically, as there's no
* initial traced exec call.
*/
void evsel__config(struct evsel *evsel, struct record_opts *opts,
struct callchain_param *callchain)
{
struct evsel *leader = evsel__leader(evsel);
struct perf_event_attr *attr = &evsel->core.attr;
int track = evsel->tracking;
bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
attr->inherit = !opts->no_inherit;
attr->write_backward = opts->overwrite ? 1 : 0;
attr->read_format = PERF_FORMAT_LOST;
evsel__set_sample_bit(evsel, IP);
evsel__set_sample_bit(evsel, TID);
if (evsel->sample_read) {
evsel__set_sample_bit(evsel, READ);
/*
* We need ID even in case of single event, because
* PERF_SAMPLE_READ process ID specific data.
*/
evsel__set_sample_id(evsel, false);
/*
* Apply group format only if we belong to group
* with more than one members.
*/
if (leader->core.nr_members > 1) {
attr->read_format |= PERF_FORMAT_GROUP;
attr->inherit = 0;
}
}
/*
* We default some events to have a default interval. But keep
* it a weak assumption overridable by the user.
*/
if ((evsel->is_libpfm_event && !attr->sample_period) ||
(!evsel->is_libpfm_event && (!attr->sample_period ||
opts->user_freq != UINT_MAX ||
opts->user_interval != ULLONG_MAX)))
evsel__set_default_freq_period(opts, attr);
/*
* If attr->freq was set (here or earlier), ask for period
* to be sampled.
*/
if (attr->freq)
evsel__set_sample_bit(evsel, PERIOD);
if (opts->no_samples)
attr->sample_freq = 0;
if (opts->inherit_stat) {
evsel->core.attr.read_format |=
PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING |
PERF_FORMAT_ID;
attr->inherit_stat = 1;
}
if (opts->sample_address) {
evsel__set_sample_bit(evsel, ADDR);
attr->mmap_data = track;
}
/*
* We don't allow user space callchains for function trace
* event, due to issues with page faults while tracing page
* fault handler and its overall trickiness nature.
*/
if (evsel__is_function_event(evsel))
evsel->core.attr.exclude_callchain_user = 1;
if (callchain && callchain->enabled && !evsel->no_aux_samples)
evsel__config_callchain(evsel, opts, callchain);
if (opts->sample_intr_regs && !evsel->no_aux_samples &&
!evsel__is_dummy_event(evsel)) {
attr->sample_regs_intr = opts->sample_intr_regs;
evsel__set_sample_bit(evsel, REGS_INTR);
}
if (opts->sample_user_regs && !evsel->no_aux_samples &&
!evsel__is_dummy_event(evsel)) {
attr->sample_regs_user |= opts->sample_user_regs;
evsel__set_sample_bit(evsel, REGS_USER);
}
if (target__has_cpu(&opts->target) || opts->sample_cpu)
evsel__set_sample_bit(evsel, CPU);
/*
* When the user explicitly disabled time don't force it here.
*/
if (opts->sample_time &&
(!perf_missing_features.sample_id_all &&
(!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
opts->sample_time_set)))
evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples && !evsel->no_aux_samples) {
evsel__set_sample_bit(evsel, TIME);
evsel__set_sample_bit(evsel, RAW);
evsel__set_sample_bit(evsel, CPU);
}
if (opts->sample_address)
evsel__set_sample_bit(evsel, DATA_SRC);
if (opts->sample_phys_addr)
evsel__set_sample_bit(evsel, PHYS_ADDR);
if (opts->no_buffering) {
attr->watermark = 0;
attr->wakeup_events = 1;
}
if (opts->branch_stack && !evsel->no_aux_samples) {
evsel__set_sample_bit(evsel, BRANCH_STACK);
attr->branch_sample_type = opts->branch_stack;
}
if (opts->sample_weight)
arch_evsel__set_sample_weight(evsel);
attr->task = track;
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
attr->build_id = track && opts->build_id;
/*
* ksymbol is tracked separately with text poke because it needs to be
* system wide and enabled immediately.
*/
if (!opts->text_poke)
attr->ksymbol = track && !perf_missing_features.ksymbol;
attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
if (opts->record_namespaces)
attr->namespaces = track;
if (opts->record_cgroup) {
attr->cgroup = track && !perf_missing_features.cgroup;
evsel__set_sample_bit(evsel, CGROUP);
}
if (opts->sample_data_page_size)
evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
if (opts->sample_code_page_size)
evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
if (opts->record_switch_events)
attr->context_switch = track;
if (opts->sample_transaction)
evsel__set_sample_bit(evsel, TRANSACTION);
if (opts->running_time) {
evsel->core.attr.read_format |=
PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
}
/*
* XXX see the function comment above
*
* Disabling only independent events or group leaders,
* keeping group members enabled.
*/
if (evsel__is_group_leader(evsel))
attr->disabled = 1;
/*
* Setting enable_on_exec for independent events and
* group leaders for traced executed by perf.
*/
if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
!opts->target.initial_delay)
attr->enable_on_exec = 1;
if (evsel->immediate) {
attr->disabled = 0;
attr->enable_on_exec = 0;
}
clockid = opts->clockid;
if (opts->use_clockid) {
attr->use_clockid = 1;
attr->clockid = opts->clockid;
}
if (evsel->precise_max)
attr->precise_ip = 3;
if (opts->all_user) {
attr->exclude_kernel = 1;
attr->exclude_user = 0;
}
if (opts->all_kernel) {
attr->exclude_kernel = 0;
attr->exclude_user = 1;
}
if (evsel->core.own_cpus || evsel->unit)
evsel->core.attr.read_format |= PERF_FORMAT_ID;
/*
* Apply event specific term settings,
* it overloads any global configuration.
*/
evsel__apply_config_terms(evsel, opts, track);
evsel->ignore_missing_thread = opts->ignore_missing_thread;
/* The --period option takes the precedence. */
if (opts->period_set) {
if (opts->period)
evsel__set_sample_bit(evsel, PERIOD);
else
evsel__reset_sample_bit(evsel, PERIOD);
}
/*
* A dummy event never triggers any actual counter and therefore
* cannot be used with branch_stack.
*
* For initial_delay, a dummy event is added implicitly.
* The software event will trigger -EOPNOTSUPP error out,
* if BRANCH_STACK bit is set.
*/
if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
if (evsel__is_offcpu_event(evsel))
evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
arch__post_evsel_config(evsel, attr);
}
int evsel__set_filter(struct evsel *evsel, const char *filter)
{
char *new_filter = strdup(filter);
if (new_filter != NULL) {
free(evsel->filter);
evsel->filter = new_filter;
return 0;
}
return -1;
}
static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter)
{
char *new_filter;
if (evsel->filter == NULL)
return evsel__set_filter(evsel, filter);
if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
free(evsel->filter);
evsel->filter = new_filter;
return 0;
}
return -1;
}
int evsel__append_tp_filter(struct evsel *evsel, const char *filter)
{
return evsel__append_filter(evsel, "(%s) && (%s)", filter);
}
int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
{
return evsel__append_filter(evsel, "%s,%s", filter);
}
/* Caller has to clear disabled after going through all CPUs. */
int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
{
return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
}
int evsel__enable(struct evsel *evsel)
{
int err = perf_evsel__enable(&evsel->core);
if (!err)
evsel->disabled = false;
return err;
}
/* Caller has to set disabled after going through all CPUs. */
int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
{
return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
}
int evsel__disable(struct evsel *evsel)
{
int err = perf_evsel__disable(&evsel->core);
/*
* We mark it disabled here so that tools that disable a event can
* ignore events after they disable it. I.e. the ring buffer may have
* already a few more events queued up before the kernel got the stop
* request.
*/
if (!err)
evsel->disabled = true;
return err;
}
void free_config_terms(struct list_head *config_terms)
{
struct evsel_config_term *term, *h;
list_for_each_entry_safe(term, h, config_terms, list) {
list_del_init(&term->list);
if (term->free_str)
zfree(&term->val.str);
free(term);
}
}
static void evsel__free_config_terms(struct evsel *evsel)
{
free_config_terms(&evsel->config_terms);
}
void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
bpf_counter__destroy(evsel);
perf_bpf_filter__destroy(evsel);
evsel__free_counts(evsel);
perf_evsel__free_fd(&evsel->core);
perf_evsel__free_id(&evsel->core);
evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp);
perf_cpu_map__put(evsel->core.cpus);
perf_cpu_map__put(evsel->core.own_cpus);
perf_thread_map__put(evsel->core.threads);
zfree(&evsel->group_name);
zfree(&evsel->name);
zfree(&evsel->filter);
zfree(&evsel->pmu_name);
zfree(&evsel->group_pmu_name);
zfree(&evsel->unit);
zfree(&evsel->metric_id);
evsel__zero_per_pkg(evsel);
hashmap__free(evsel->per_pkg_mask);
evsel->per_pkg_mask = NULL;
zfree(&evsel->metric_events);
perf_evsel__object.fini(evsel);
}
void evsel__delete(struct evsel *evsel)
{
if (!evsel)
return;
evsel__exit(evsel);
free(evsel);
}
void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count)
{
struct perf_counts_values tmp;
if (!evsel->prev_raw_counts)
return;
tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
*perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
count->val = count->val - tmp.val;
count->ena = count->ena - tmp.ena;
count->run = count->run - tmp.run;
}
static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
{
struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
}
static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
u64 val, u64 ena, u64 run, u64 lost)
{
struct perf_counts_values *count;
count = perf_counts(counter->counts, cpu_map_idx, thread);
count->val = val;
count->ena = ena;
count->run = run;
count->lost = lost;
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
}
static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
{
u64 read_format = leader->core.attr.read_format;
struct sample_read_value *v;
u64 nr, ena = 0, run = 0, lost = 0;
nr = *data++;
if (nr != (u64) leader->core.nr_members)
return -EINVAL;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
ena = *data++;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
run = *data++;
v = (void *)data;
sample_read_group__for_each(v, nr, read_format) {
struct evsel *counter;
counter = evlist__id2evsel(leader->evlist, v->id);
if (!counter)
return -EINVAL;
if (read_format & PERF_FORMAT_LOST)
lost = v->lost;
evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
}
return 0;
}
static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
{
struct perf_stat_evsel *ps = leader->stats;
u64 read_format = leader->core.attr.read_format;
int size = perf_evsel__read_size(&leader->core);
u64 *data = ps->group_data;
if (!(read_format & PERF_FORMAT_ID))
return -EINVAL;
if (!evsel__is_group_leader(leader))
return -EINVAL;
if (!data) {
data = zalloc(size);
if (!data)
return -ENOMEM;
ps->group_data = data;
}
if (FD(leader, cpu_map_idx, thread) < 0)
return -EINVAL;
if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
return -errno;
return evsel__process_group_data(leader, cpu_map_idx, thread, data);
}
int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
{
u64 read_format = evsel->core.attr.read_format;
if (read_format & PERF_FORMAT_GROUP)
return evsel__read_group(evsel, cpu_map_idx, thread);
return evsel__read_one(evsel, cpu_map_idx, thread);
}
int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
{
struct perf_counts_values count;
size_t nv = scale ? 3 : 1;
if (FD(evsel, cpu_map_idx, thread) < 0)
return -EINVAL;
if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
return -ENOMEM;
if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
return -errno;
evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
perf_counts_values__scale(&count, scale, NULL);
*perf_counts(evsel->counts, cpu_map_idx, thread) = count;
return 0;
}
static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
int cpu_map_idx)
{
struct perf_cpu cpu;
cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
return perf_cpu_map__idx(other->core.cpus, cpu);
}
static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
{
struct evsel *leader = evsel__leader(evsel);
if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
(!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
}
return cpu_map_idx;
}
static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
{
struct evsel *leader = evsel__leader(evsel);
int fd;
if (evsel__is_group_leader(evsel))
return -1;
/*
* Leader must be already processed/open,
* if not it's a bug.
*/
BUG_ON(!leader->core.fd);
cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
if (cpu_map_idx == -1)
return -1;
fd = FD(leader, cpu_map_idx, thread);
BUG_ON(fd == -1 && !leader->skippable);
/*
* When the leader has been skipped, return -2 to distinguish from no
* group leader case.
*/
return fd == -1 ? -2 : fd;
}
static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
{
for (int cpu = 0; cpu < nr_cpus; cpu++)
for (int thread = thread_idx; thread < nr_threads - 1; thread++)
FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
}
static int update_fds(struct evsel *evsel,
int nr_cpus, int cpu_map_idx,
int nr_threads, int thread_idx)
{
struct evsel *pos;
if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
return -EINVAL;
evlist__for_each_entry(evsel->evlist, pos) {
nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
/*
* Since fds for next evsel has not been created,
* there is no need to iterate whole event list.
*/
if (pos == evsel)
break;
}
return 0;
}
static bool evsel__ignore_missing_thread(struct evsel *evsel,
int nr_cpus, int cpu_map_idx,
struct perf_thread_map *threads,
int thread, int err)
{
pid_t ignore_pid = perf_thread_map__pid(threads, thread);
if (!evsel->ignore_missing_thread)
return false;
/* The system wide setup does not work with threads. */
if (evsel->core.system_wide)
return false;
/* The -ESRCH is perf event syscall errno for pid's not found. */
if (err != -ESRCH)
return false;
/* If there's only one thread, let it fail. */
if (threads->nr == 1)
return false;
/*
* We should remove fd for missing_thread first
* because thread_map__remove() will decrease threads->nr.
*/
if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
return false;
if (thread_map__remove(threads, thread))
return false;
pr_warning("WARNING: Ignored open failure for pid %d\n",
ignore_pid);
return true;
}
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
void *priv __maybe_unused)
{
return fprintf(fp, " %-32s %s\n", name, val);
}
static void display_attr(struct perf_event_attr *attr)
{
if (verbose >= 2 || debug_peo_args) {
fprintf(stderr, "%.60s\n", graph_dotted_line);
fprintf(stderr, "perf_event_attr:\n");
perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
fprintf(stderr, "%.60s\n", graph_dotted_line);
}
}
bool evsel__precise_ip_fallback(struct evsel *evsel)
{
/* Do not try less precise if not requested. */
if (!evsel->precise_max)
return false;
/*
* We tried all the precise_ip values, and it's
* still failing, so leave it to standard fallback.
*/
if (!evsel->core.attr.precise_ip) {
evsel->core.attr.precise_ip = evsel->precise_ip_original;
return false;
}
if (!evsel->precise_ip_original)
evsel->precise_ip_original = evsel->core.attr.precise_ip;
evsel->core.attr.precise_ip--;
pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
display_attr(&evsel->core.attr);
return true;
}
static struct perf_cpu_map *empty_cpu_map;
static struct perf_thread_map *empty_thread_map;
static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
int nthreads = perf_thread_map__nr(threads);
if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
(perf_missing_features.aux_output && evsel->core.attr.aux_output))
return -EINVAL;
if (cpus == NULL) {
if (empty_cpu_map == NULL) {
empty_cpu_map = perf_cpu_map__dummy_new();
if (empty_cpu_map == NULL)
return -ENOMEM;
}
cpus = empty_cpu_map;
}
if (threads == NULL) {
if (empty_thread_map == NULL) {
empty_thread_map = thread_map__new_by_tid(-1);
if (empty_thread_map == NULL)
return -ENOMEM;
}
threads = empty_thread_map;
}
if (evsel->core.fd == NULL &&
perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
return -ENOMEM;
evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
if (evsel->cgrp)
evsel->open_flags |= PERF_FLAG_PID_CGROUP;
return 0;
}
static void evsel__disable_missing_features(struct evsel *evsel)
{
if (perf_missing_features.read_lost)
evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
if (perf_missing_features.weight_struct) {
evsel__set_sample_bit(evsel, WEIGHT);
evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
}
if (perf_missing_features.clockid_wrong)
evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
if (perf_missing_features.clockid) {
evsel->core.attr.use_clockid = 0;
evsel->core.attr.clockid = 0;
}
if (perf_missing_features.cloexec)
evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
if (perf_missing_features.mmap2)
evsel->core.attr.mmap2 = 0;
if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
if (perf_missing_features.lbr_flags)
evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES);
if (perf_missing_features.group_read && evsel->core.attr.inherit)
evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
if (perf_missing_features.ksymbol)
evsel->core.attr.ksymbol = 0;
if (perf_missing_features.bpf)
evsel->core.attr.bpf_event = 0;
if (perf_missing_features.branch_hw_idx)
evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
if (perf_missing_features.sample_id_all)
evsel->core.attr.sample_id_all = 0;
}
int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
int err;
err = __evsel__prepare_open(evsel, cpus, threads);
if (err)
return err;
evsel__disable_missing_features(evsel);
return err;
}
bool evsel__detect_missing_features(struct evsel *evsel)
{
/*
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
if (!perf_missing_features.read_lost &&
(evsel->core.attr.read_format & PERF_FORMAT_LOST)) {
perf_missing_features.read_lost = true;
pr_debug2("switching off PERF_FORMAT_LOST support\n");
return true;
} else if (!perf_missing_features.weight_struct &&
(evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) {
perf_missing_features.weight_struct = true;
pr_debug2("switching off weight struct support\n");
return true;
} else if (!perf_missing_features.code_page_size &&
(evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) {
perf_missing_features.code_page_size = true;
pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n");
return false;
} else if (!perf_missing_features.data_page_size &&
(evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) {
perf_missing_features.data_page_size = true;
pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n");
return false;
} else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
perf_missing_features.cgroup = true;
pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n");
return false;
} else if (!perf_missing_features.branch_hw_idx &&
(evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
perf_missing_features.branch_hw_idx = true;
pr_debug2("switching off branch HW index support\n");
return true;
} else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
perf_missing_features.aux_output = true;
pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
return false;
} else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
perf_missing_features.bpf = true;
pr_debug2_peo("switching off bpf_event\n");
return true;
} else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
perf_missing_features.ksymbol = true;
pr_debug2_peo("switching off ksymbol\n");
return true;
} else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
perf_missing_features.write_backward = true;
pr_debug2_peo("switching off write_backward\n");
return false;
} else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
perf_missing_features.clockid_wrong = true;
pr_debug2_peo("switching off clockid\n");
return true;
} else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
perf_missing_features.clockid = true;
pr_debug2_peo("switching off use_clockid\n");
return true;
} else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) {
perf_missing_features.cloexec = true;
pr_debug2_peo("switching off cloexec flag\n");
return true;
} else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
perf_missing_features.mmap2 = true;
pr_debug2_peo("switching off mmap2\n");
return true;
} else if (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) {
if (evsel->pmu == NULL)
evsel->pmu = evsel__find_pmu(evsel);
if (evsel->pmu)
evsel->pmu->missing_features.exclude_guest = true;
else {
/* we cannot find PMU, disable attrs now */
evsel->core.attr.exclude_host = false;
evsel->core.attr.exclude_guest = false;
}
if (evsel->exclude_GH) {
pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n");
return false;
}
if (!perf_missing_features.exclude_guest) {
perf_missing_features.exclude_guest = true;
pr_debug2_peo("switching off exclude_guest, exclude_host\n");
}
return true;
} else if (!perf_missing_features.sample_id_all) {
perf_missing_features.sample_id_all = true;
pr_debug2_peo("switching off sample_id_all\n");
return true;
} else if (!perf_missing_features.lbr_flags &&
(evsel->core.attr.branch_sample_type &
(PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_NO_FLAGS))) {
perf_missing_features.lbr_flags = true;
pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
return true;
} else if (!perf_missing_features.group_read &&
evsel->core.attr.inherit &&
(evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
evsel__is_group_leader(evsel)) {
perf_missing_features.group_read = true;
pr_debug2_peo("switching off group read\n");
return true;
} else {
return false;
}
}
bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
{
int old_errno;
struct rlimit l;
if (*set_rlimit < INCREASED_MAX) {
old_errno = errno;
if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
if (*set_rlimit == NO_CHANGE) {
l.rlim_cur = l.rlim_max;
} else {
l.rlim_cur = l.rlim_max + 1000;
l.rlim_max = l.rlim_cur;
}
if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
(*set_rlimit) += 1;
errno = old_errno;
return true;
}
}
errno = old_errno;
}
return false;
}
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads,
int start_cpu_map_idx, int end_cpu_map_idx)
{
int idx, thread, nthreads;
int pid = -1, err, old_errno;
enum rlimit_action set_rlimit = NO_CHANGE;
err = __evsel__prepare_open(evsel, cpus, threads);
if (err)
return err;
if (cpus == NULL)
cpus = empty_cpu_map;
if (threads == NULL)
threads = empty_thread_map;
nthreads = perf_thread_map__nr(threads);
if (evsel->cgrp)
pid = evsel->cgrp->fd;
fallback_missing_features:
evsel__disable_missing_features(evsel);
pr_debug3("Opening: %s\n", evsel__name(evsel));
display_attr(&evsel->core.attr);
for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd;
retry_open:
if (thread >= nthreads)
break;
if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
group_fd = get_group_fd(evsel, idx, thread);
if (group_fd == -2) {
pr_debug("broken group leader for %s\n", evsel->name);
err = -EINVAL;
goto out_close;
}
test_attr__ready();
/* Debug message used by test scripts */
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
fd = sys_perf_event_open(&evsel->core.attr, pid,
perf_cpu_map__cpu(cpus, idx).cpu,
group_fd, evsel->open_flags);
FD(evsel, idx, thread) = fd;
if (fd < 0) {
err = -errno;
pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
err);
goto try_fallback;
}
bpf_counter__install_pe(evsel, idx, fd);
if (unlikely(test_attr__enabled)) {
test_attr__open(&evsel->core.attr, pid,
perf_cpu_map__cpu(cpus, idx),
fd, group_fd, evsel->open_flags);
}
/* Debug message used by test scripts */
pr_debug2_peo(" = %d\n", fd);
if (evsel->bpf_fd >= 0) {
int evt_fd = fd;
int bpf_fd = evsel->bpf_fd;
err = ioctl(evt_fd,
PERF_EVENT_IOC_SET_BPF,
bpf_fd);
if (err && errno != EEXIST) {
pr_err("failed to attach bpf fd %d: %s\n",
bpf_fd, strerror(errno));
err = -EINVAL;
goto out_close;
}
}
set_rlimit = NO_CHANGE;
/*
* If we succeeded but had to kill clockid, fail and
* have evsel__open_strerror() print us a nice error.
*/
if (perf_missing_features.clockid ||
perf_missing_features.clockid_wrong) {
err = -EINVAL;
goto out_close;
}
}
}
return 0;
try_fallback:
if (evsel__precise_ip_fallback(evsel))
goto retry_open;
if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
idx, threads, thread, err)) {
/* We just removed 1 thread, so lower the upper nthreads limit. */
nthreads--;
/* ... and pretend like nothing have happened. */
err = 0;
goto retry_open;
}
/*
* perf stat needs between 5 and 22 fds per CPU. When we run out
* of them try to increase the limits.
*/
if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
goto retry_open;
if (err != -EINVAL || idx > 0 || thread > 0)
goto out_close;
if (evsel__detect_missing_features(evsel))
goto fallback_missing_features;
out_close:
if (err)
threads->err_thread = thread;
old_errno = errno;
do {
while (--thread >= 0) {
if (FD(evsel, idx, thread) >= 0)
close(FD(evsel, idx, thread));
FD(evsel, idx, thread) = -1;
}
thread = nthreads;
} while (--idx >= 0);
errno = old_errno;
return err;
}
int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
}
void evsel__close(struct evsel *evsel)
{
perf_evsel__close(&evsel->core);
perf_evsel__free_id(&evsel->core);
}
int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
{
if (cpu_map_idx == -1)
return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
}
int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
{
return evsel__open(evsel, NULL, threads);
}
static int perf_evsel__parse_id_sample(const struct evsel *evsel,
const union perf_event *event,
struct perf_sample *sample)
{
u64 type = evsel->core.attr.sample_type;
const __u64 *array = event->sample.array;
bool swapped = evsel->needs_swap;
union u64_swap u;
array += ((event->header.size -
sizeof(event->header)) / sizeof(u64)) - 1;
if (type & PERF_SAMPLE_IDENTIFIER) {
sample->id = *array;
array--;
}
if (type & PERF_SAMPLE_CPU) {
u.val64 = *array;
if (swapped) {
/* undo swap of u64, then swap on individual u32s */
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
}
sample->cpu = u.val32[0];
array--;
}
if (type & PERF_SAMPLE_STREAM_ID) {
sample->stream_id = *array;
array--;
}
if (type & PERF_SAMPLE_ID) {
sample->id = *array;
array--;
}
if (type & PERF_SAMPLE_TIME) {
sample->time = *array;
array--;
}
if (type & PERF_SAMPLE_TID) {
u.val64 = *array;
if (swapped) {
/* undo swap of u64, then swap on individual u32s */
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
sample->pid = u.val32[0];
sample->tid = u.val32[1];
array--;
}
return 0;
}
static inline bool overflow(const void *endp, u16 max_size, const void *offset,
u64 size)
{
return size > max_size || offset + size > endp;
}
#define OVERFLOW_CHECK(offset, size, max_size) \
do { \
if (overflow(endp, (max_size), (offset), (size))) \
return -EFAULT; \
} while (0)
#define OVERFLOW_CHECK_u64(offset) \
OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
static int
perf_event__check_size(union perf_event *event, unsigned int sample_size)
{
/*
* The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
* up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
* check the format does not go past the end of the event.
*/
if (sample_size + sizeof(event->header) > event->header.size)
return -EFAULT;
return 0;
}
void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
const __u64 *array,
u64 type __maybe_unused)
{
data->weight = *array;
}
u64 evsel__bitfield_swap_branch_flags(u64 value)
{
u64 new_val = 0;
/*
* branch_flags
* union {
* u64 values;
* struct {
* mispred:1 //target mispredicted
* predicted:1 //target predicted
* in_tx:1 //in transaction
* abort:1 //transaction abort
* cycles:16 //cycle count to last branch
* type:4 //branch type
* spec:2 //branch speculation info
* new_type:4 //additional branch type
* priv:3 //privilege level
* reserved:31
* }
* }
*
* Avoid bswap64() the entire branch_flag.value,
* as it has variable bit-field sizes. Instead the
* macro takes the bit-field position/size,
* swaps it based on the host endianness.
*/
if (host_is_bigendian()) {
new_val = bitfield_swap(value, 0, 1);
new_val |= bitfield_swap(value, 1, 1);
new_val |= bitfield_swap(value, 2, 1);
new_val |= bitfield_swap(value, 3, 1);
new_val |= bitfield_swap(value, 4, 16);
new_val |= bitfield_swap(value, 20, 4);
new_val |= bitfield_swap(value, 24, 2);
new_val |= bitfield_swap(value, 26, 4);
new_val |= bitfield_swap(value, 30, 3);
new_val |= bitfield_swap(value, 33, 31);
} else {
new_val = bitfield_swap(value, 63, 1);
new_val |= bitfield_swap(value, 62, 1);
new_val |= bitfield_swap(value, 61, 1);
new_val |= bitfield_swap(value, 60, 1);
new_val |= bitfield_swap(value, 44, 16);
new_val |= bitfield_swap(value, 40, 4);
new_val |= bitfield_swap(value, 38, 2);
new_val |= bitfield_swap(value, 34, 4);
new_val |= bitfield_swap(value, 31, 3);
new_val |= bitfield_swap(value, 0, 31);
}
return new_val;
}
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
u64 type = evsel->core.attr.sample_type;
bool swapped = evsel->needs_swap;
const __u64 *array;
u16 max_size = event->header.size;
const void *endp = (void *)event + max_size;
u64 sz;
/*
* used for cross-endian analysis. See git commit 65014ab3
* for why this goofiness is needed.
*/
union u64_swap u;
memset(data, 0, sizeof(*data));
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
data->period = evsel->core.attr.sample_period;
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
data->misc = event->header.misc;
data->id = -1ULL;
data->data_src = PERF_MEM_DATA_SRC_NONE;
data->vcpu = -1;
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->core.attr.sample_id_all)
return 0;
return perf_evsel__parse_id_sample(evsel, event, data);
}
array = event->sample.array;
if (perf_event__check_size(event, evsel->sample_size))
return -EFAULT;
if (type & PERF_SAMPLE_IDENTIFIER) {
data->id = *array;
array++;
}
if (type & PERF_SAMPLE_IP) {
data->ip = *array;
array++;
}
if (type & PERF_SAMPLE_TID) {
u.val64 = *array;
if (swapped) {
/* undo swap of u64, then swap on individual u32s */
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
data->pid = u.val32[0];
data->tid = u.val32[1];
array++;
}
if (type & PERF_SAMPLE_TIME) {
data->time = *array;
array++;
}
if (type & PERF_SAMPLE_ADDR) {
data->addr = *array;
array++;
}
if (type & PERF_SAMPLE_ID) {
data->id = *array;
array++;
}
if (type & PERF_SAMPLE_STREAM_ID) {
data->stream_id = *array;
array++;
}
if (type & PERF_SAMPLE_CPU) {
u.val64 = *array;
if (swapped) {
/* undo swap of u64, then swap on individual u32s */
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
}
data->cpu = u.val32[0];
array++;
}
if (type & PERF_SAMPLE_PERIOD) {
data->period = *array;
array++;
}
if (type & PERF_SAMPLE_READ) {
u64 read_format = evsel->core.attr.read_format;
OVERFLOW_CHECK_u64(array);
if (read_format & PERF_FORMAT_GROUP)
data->read.group.nr = *array;
else
data->read.one.value = *array;
array++;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
OVERFLOW_CHECK_u64(array);
data->read.time_enabled = *array;
array++;
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
OVERFLOW_CHECK_u64(array);
data->read.time_running = *array;
array++;
}
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
const u64 max_group_nr = UINT64_MAX /
sizeof(struct sample_read_value);
if (data->read.group.nr > max_group_nr)
return -EFAULT;
sz = data->read.group.nr * sample_read_value_size(read_format);
OVERFLOW_CHECK(array, sz, max_size);
data->read.group.values =
(struct sample_read_value *)array;
array = (void *)array + sz;
} else {
OVERFLOW_CHECK_u64(array);
data->read.one.id = *array;
array++;
if (read_format & PERF_FORMAT_LOST) {
OVERFLOW_CHECK_u64(array);
data->read.one.lost = *array;
array++;
}
}
}
if (type & PERF_SAMPLE_CALLCHAIN) {
const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
OVERFLOW_CHECK_u64(array);
data->callchain = (struct ip_callchain *)array++;
if (data->callchain->nr > max_callchain_nr)
return -EFAULT;
sz = data->callchain->nr * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
array = (void *)array + sz;
}
if (type & PERF_SAMPLE_RAW) {
OVERFLOW_CHECK_u64(array);
u.val64 = *array;
/*
* Undo swap of u64, then swap on individual u32s,
* get the size of the raw area and undo all of the
* swap. The pevent interface handles endianness by
* itself.
*/
if (swapped) {
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
data->raw_size = u.val32[0];
/*
* The raw data is aligned on 64bits including the
* u32 size, so it's safe to use mem_bswap_64.
*/
if (swapped)
mem_bswap_64((void *) array, data->raw_size);
array = (void *)array + sizeof(u32);
OVERFLOW_CHECK(array, data->raw_size, max_size);
data->raw_data = (void *)array;
array = (void *)array + data->raw_size;
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
const u64 max_branch_nr = UINT64_MAX /
sizeof(struct branch_entry);
struct branch_entry *e;
unsigned int i;
OVERFLOW_CHECK_u64(array);
data->branch_stack = (struct branch_stack *)array++;
if (data->branch_stack->nr > max_branch_nr)
return -EFAULT;
sz = data->branch_stack->nr * sizeof(struct branch_entry);
if (evsel__has_branch_hw_idx(evsel)) {
sz += sizeof(u64);
e = &data->branch_stack->entries[0];
} else {
data->no_hw_idx = true;
/*
* if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied,
* only nr and entries[] will be output by kernel.
*/
e = (struct branch_entry *)&data->branch_stack->hw_idx;
}
if (swapped) {
/*
* struct branch_flag does not have endian
* specific bit field definition. And bswap
* will not resolve the issue, since these
* are bit fields.
*
* evsel__bitfield_swap_branch_flags() uses a
* bitfield_swap macro to swap the bit position
* based on the host endians.
*/
for (i = 0; i < data->branch_stack->nr; i++, e++)
e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value);
}
OVERFLOW_CHECK(array, sz, max_size);
array = (void *)array + sz;
}
if (type & PERF_SAMPLE_REGS_USER) {
OVERFLOW_CHECK_u64(array);
data->user_regs.abi = *array;
array++;
if (data->user_regs.abi) {
u64 mask = evsel->core.attr.sample_regs_user;
sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
data->user_regs.mask = mask;
data->user_regs.regs = (u64 *)array;
array = (void *)array + sz;
}
}
if (type & PERF_SAMPLE_STACK_USER) {
OVERFLOW_CHECK_u64(array);
sz = *array++;
data->user_stack.offset = ((char *)(array - 1)
- (char *) event);
if (!sz) {
data->user_stack.size = 0;
} else {
OVERFLOW_CHECK(array, sz, max_size);
data->user_stack.data = (char *)array;
array = (void *)array + sz;
OVERFLOW_CHECK_u64(array);
data->user_stack.size = *array++;
if (WARN_ONCE(data->user_stack.size > sz,
"user stack dump failure\n"))
return -EFAULT;
}
}
if (type & PERF_SAMPLE_WEIGHT_TYPE) {
OVERFLOW_CHECK_u64(array);
arch_perf_parse_sample_weight(data, array, type);
array++;
}
if (type & PERF_SAMPLE_DATA_SRC) {
OVERFLOW_CHECK_u64(array);
data->data_src = *array;
array++;
}
if (type & PERF_SAMPLE_TRANSACTION) {
OVERFLOW_CHECK_u64(array);
data->transaction = *array;
array++;
}
data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
if (type & PERF_SAMPLE_REGS_INTR) {
OVERFLOW_CHECK_u64(array);
data->intr_regs.abi = *array;
array++;
if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
u64 mask = evsel->core.attr.sample_regs_intr;
sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
data->intr_regs.mask = mask;
data->intr_regs.regs = (u64 *)array;
array = (void *)array + sz;
}
}
data->phys_addr = 0;
if (type & PERF_SAMPLE_PHYS_ADDR) {
data->phys_addr = *array;
array++;
}
data->cgroup = 0;
if (type & PERF_SAMPLE_CGROUP) {
data->cgroup = *array;
array++;
}
data->data_page_size = 0;
if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
data->data_page_size = *array;
array++;
}
data->code_page_size = 0;
if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
data->code_page_size = *array;
array++;
}
if (type & PERF_SAMPLE_AUX) {
OVERFLOW_CHECK_u64(array);
sz = *array++;
OVERFLOW_CHECK(array, sz, max_size);
/* Undo swap of data */
if (swapped)
mem_bswap_64((char *)array, sz);
data->aux_sample.size = sz;
data->aux_sample.data = (char *)array;
array = (void *)array + sz;
}
return 0;
}
int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
u64 *timestamp)
{
u64 type = evsel->core.attr.sample_type;
const __u64 *array;
if (!(type & PERF_SAMPLE_TIME))
return -1;
if (event->header.type != PERF_RECORD_SAMPLE) {
struct perf_sample data = {
.time = -1ULL,
};
if (!evsel->core.attr.sample_id_all)
return -1;
if (perf_evsel__parse_id_sample(evsel, event, &data))
return -1;
*timestamp = data.time;
return 0;
}
array = event->sample.array;
if (perf_event__check_size(event, evsel->sample_size))
return -EFAULT;
if (type & PERF_SAMPLE_IDENTIFIER)
array++;
if (type & PERF_SAMPLE_IP)
array++;
if (type & PERF_SAMPLE_TID)
array++;
if (type & PERF_SAMPLE_TIME)
*timestamp = *array;
return 0;
}
u16 evsel__id_hdr_size(struct evsel *evsel)
{
u64 sample_type = evsel->core.attr.sample_type;
u16 size = 0;
if (sample_type & PERF_SAMPLE_TID)
size += sizeof(u64);
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(u64);
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(u64);
if (sample_type & PERF_SAMPLE_STREAM_ID)
size += sizeof(u64);
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(u64);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
size += sizeof(u64);
return size;
}
#ifdef HAVE_LIBTRACEEVENT
struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
{
return tep_find_field(evsel->tp_format, name);
}
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
{
struct tep_format_field *field = evsel__field(evsel, name);
int offset;
if (!field)
return NULL;
offset = field->offset;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
offset = *(int *)(sample->raw_data + field->offset);
offset &= 0xffff;
if (tep_field_is_relative(field->flags))
offset += field->offset + field->size;
}
return sample->raw_data + offset;
}
u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
bool needs_swap)
{
u64 value;
void *ptr = sample->raw_data + field->offset;
switch (field->size) {
case 1:
return *(u8 *)ptr;
case 2:
value = *(u16 *)ptr;
break;
case 4:
value = *(u32 *)ptr;
break;
case 8:
memcpy(&value, ptr, sizeof(u64));
break;
default:
return 0;
}
if (!needs_swap)
return value;
switch (field->size) {
case 2:
return bswap_16(value);
case 4:
return bswap_32(value);
case 8:
return bswap_64(value);
default:
return 0;
}
return 0;
}
u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name)
{
struct tep_format_field *field = evsel__field(evsel, name);
return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
}
#endif
bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
{
int paranoid;
if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
evsel->core.attr.type == PERF_TYPE_HARDWARE &&
evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
/*
* If it's cycles then fall back to hrtimer based
* cpu-clock-tick sw counter, which is always available even if
* no PMU support.
*
* PPC returns ENXIO until 2.6.37 (behavior changed with commit
* b0a873e).
*/
scnprintf(msg, msgsize, "%s",
"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
evsel->core.attr.type = PERF_TYPE_SOFTWARE;
evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
zfree(&evsel->name);
return true;
} else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
(paranoid = perf_event_paranoid()) > 1) {
const char *name = evsel__name(evsel);
char *new_name;
const char *sep = ":";
/* If event has exclude user then don't exclude kernel. */
if (evsel->core.attr.exclude_user)
return false;
/* Is there already the separator in the name. */
if (strchr(name, '/') ||
(strchr(name, ':') && !evsel->is_libpfm_event))
sep = "";
if (asprintf(&new_name, "%s%su", name, sep) < 0)
return false;
free(evsel->name);
evsel->name = new_name;
scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
"to fall back to excluding kernel and hypervisor "
" samples", paranoid);
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_hv = 1;
return true;
}
return false;
}
static bool find_process(const char *name)
{
size_t len = strlen(name);
DIR *dir;
struct dirent *d;
int ret = -1;
dir = opendir(procfs__mountpoint());
if (!dir)
return false;
/* Walk through the directory. */
while (ret && (d = readdir(dir)) != NULL) {
char path[PATH_MAX];
char *data;
size_t size;
if ((d->d_type != DT_DIR) ||
!strcmp(".", d->d_name) ||
!strcmp("..", d->d_name))
continue;
scnprintf(path, sizeof(path), "%s/%s/comm",
procfs__mountpoint(), d->d_name);
if (filename__read_str(path, &data, &size))
continue;
ret = strncmp(name, data, len);
free(data);
}
closedir(dir);
return ret ? false : true;
}
int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
char *msg __maybe_unused,
size_t size __maybe_unused)
{
return 0;
}
int evsel__open_strerror(struct evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
char sbuf[STRERR_BUFSIZE];
int printed = 0, enforced = 0;
int ret;
switch (err) {
case EPERM:
case EACCES:
printed += scnprintf(msg + printed, size - printed,
"Access to performance monitoring and observability operations is limited.\n");
if (!sysfs__read_int("fs/selinux/enforce", &enforced)) {
if (enforced) {
printed += scnprintf(msg + printed, size - printed,
"Enforced MAC policy settings (SELinux) can limit access to performance\n"
"monitoring and observability operations. Inspect system audit records for\n"
"more perf_event access control information and adjusting the policy.\n");
}
}
if (err == EPERM)
printed += scnprintf(msg, size,
"No permission to enable %s event.\n\n", evsel__name(evsel));
return scnprintf(msg + printed, size - printed,
"Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
"access to performance monitoring and observability operations for processes\n"
"without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
"More information can be found at 'Perf events and tool security' document:\n"
"https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n"
"perf_event_paranoid setting is %d:\n"
" -1: Allow use of (almost) all events by all users\n"
" Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
">= 0: Disallow raw and ftrace function tracepoint access\n"
">= 1: Disallow CPU event access\n"
">= 2: Disallow kernel profiling\n"
"To make the adjusted perf_event_paranoid setting permanent preserve it\n"
"in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)",
perf_event_paranoid());
case ENOENT:
return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
case EMFILE:
return scnprintf(msg, size, "%s",
"Too many events are opened.\n"
"Probably the maximum number of open file descriptors has been reached.\n"
"Hint: Try again after reducing the number of events.\n"
"Hint: Try increasing the limit with 'ulimit -n <limit>'");
case ENOMEM:
if (evsel__has_callchain(evsel) &&
access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
return scnprintf(msg, size,
"Not enough memory to setup event with callchain.\n"
"Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
"Hint: Current value: %d", sysctl__max_stack());
break;
case ENODEV:
if (target->cpu_list)
return scnprintf(msg, size, "%s",
"No such device - did you specify an out-of-range profile CPU?");
break;
case EOPNOTSUPP:
if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)
return scnprintf(msg, size,
"%s: PMU Hardware or event type doesn't support branch stack sampling.",
evsel__name(evsel));
if (evsel->core.attr.aux_output)
return scnprintf(msg, size,
"%s: PMU Hardware doesn't support 'aux_output' feature",
evsel__name(evsel));
if (evsel->core.attr.sample_period != 0)
return scnprintf(msg, size,
"%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
evsel__name(evsel));
if (evsel->core.attr.precise_ip)
return scnprintf(msg, size, "%s",
"\'precise\' request may not be supported. Try removing 'p' modifier.");
#if defined(__i386__) || defined(__x86_64__)
if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
return scnprintf(msg, size, "%s",
"No hardware sampling interrupt available.\n");
#endif
break;
case EBUSY:
if (find_process("oprofiled"))
return scnprintf(msg, size,
"The PMU counters are busy/taken by another profiler.\n"
"We found oprofile daemon running, please stop it and try again.");
break;
case EINVAL:
if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
if (perf_missing_features.clockid)
return scnprintf(msg, size, "clockid feature not supported.");
if (perf_missing_features.clockid_wrong)
return scnprintf(msg, size, "wrong clockid (%d).", clockid);
if (perf_missing_features.aux_output)
return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
if (!target__has_cpu(target))
return scnprintf(msg, size,
"Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
evsel__name(evsel));
break;
case ENODATA:
return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
"Please add an auxiliary event in front of the load latency event.");
default:
break;
}
ret = arch_evsel__open_strerror(evsel, msg, size);
if (ret)
return ret;
return scnprintf(msg, size,
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
"/bin/dmesg | grep -i perf may provide additional information.\n",
err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
}
struct perf_env *evsel__env(struct evsel *evsel)
{
if (evsel && evsel->evlist && evsel->evlist->env)
return evsel->evlist->env;
return &perf_env;
}
static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
{
int cpu_map_idx, thread;
for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
thread++) {
int fd = FD(evsel, cpu_map_idx, thread);
if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
cpu_map_idx, thread, fd) < 0)
return -1;
}
}
return 0;
}
int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
{
struct perf_cpu_map *cpus = evsel->core.cpus;
struct perf_thread_map *threads = evsel->core.threads;
if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
return -ENOMEM;
return store_evsel_ids(evsel, evlist);
}
void evsel__zero_per_pkg(struct evsel *evsel)
{
struct hashmap_entry *cur;
size_t bkt;
if (evsel->per_pkg_mask) {
hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
zfree(&cur->pkey);
hashmap__clear(evsel->per_pkg_mask);
}
}
/**
* evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this
* will be false on hybrid systems for hardware and legacy
* cache events.
*/
bool evsel__is_hybrid(const struct evsel *evsel)
{
if (perf_pmus__num_core_pmus() == 1)
return false;
return evsel->core.is_pmu_core;
}
struct evsel *evsel__leader(const struct evsel *evsel)
{
return container_of(evsel->core.leader, struct evsel, core);
}
bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
{
return evsel->core.leader == &leader->core;
}
bool evsel__is_leader(struct evsel *evsel)
{
return evsel__has_leader(evsel, evsel);
}
void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
{
evsel->core.leader = &leader->core;
}
int evsel__source_count(const struct evsel *evsel)
{
struct evsel *pos;
int count = 0;
evlist__for_each_entry(evsel->evlist, pos) {
if (pos->metric_leader == evsel)
count++;
}
return count;
}
bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused)
{
return false;
}
/*
* Remove an event from a given group (leader).
* Some events, e.g., perf metrics Topdown events,
* must always be grouped. Ignore the events.
*/
void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
{
if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) {
evsel__set_leader(evsel, evsel);
evsel->core.nr_members = 0;
leader->core.nr_members--;
}
}
| linux-master | tools/perf/util/evsel.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <string.h>
#include "api/fs/fs.h"
#include "cputopo.h"
#include "smt.h"
bool smt_on(void)
{
static bool cached;
static bool cached_result;
int fs_value;
if (cached)
return cached_result;
if (sysfs__read_int("devices/system/cpu/smt/active", &fs_value) >= 0)
cached_result = (fs_value == 1);
else
cached_result = cpu_topology__smt_on(online_topology());
cached = true;
return cached_result;
}
bool core_wide(bool system_wide, const char *user_requested_cpu_list)
{
/* If not everything running on a core is being recorded then we can't use core_wide. */
if (!system_wide)
return false;
/* Cheap case that SMT is disabled and therefore we're inherently core_wide. */
if (!smt_on())
return true;
return cpu_topology__core_wide(online_topology(), user_requested_cpu_list);
}
| linux-master | tools/perf/util/smt.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <subcmd/pager.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "color.h"
#include <math.h>
#include <unistd.h>
int perf_use_color_default = -1;
static int __color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args, const char *trail)
{
int r = 0;
/*
* Auto-detect:
*/
if (perf_use_color_default < 0) {
if (isatty(1) || pager_in_use())
perf_use_color_default = 1;
else
perf_use_color_default = 0;
}
if (perf_use_color_default && *color)
r += scnprintf(bf, size, "%s", color);
r += vscnprintf(bf + r, size - r, fmt, args);
if (perf_use_color_default && *color)
r += scnprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
if (trail)
r += scnprintf(bf + r, size - r, "%s", trail);
return r;
}
/* Colors are not included in return value */
static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
va_list args)
{
int r = 0;
/*
* Auto-detect:
*/
if (perf_use_color_default < 0) {
if (isatty(fileno(fp)) || pager_in_use())
perf_use_color_default = 1;
else
perf_use_color_default = 0;
}
if (perf_use_color_default && *color)
fprintf(fp, "%s", color);
r += vfprintf(fp, fmt, args);
if (perf_use_color_default && *color)
fprintf(fp, "%s", PERF_COLOR_RESET);
return r;
}
int color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args)
{
return __color_vsnprintf(bf, size, color, fmt, args, NULL);
}
int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
{
return __color_vfprintf(fp, color, fmt, args);
}
int color_snprintf(char *bf, size_t size, const char *color,
const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = color_vsnprintf(bf, size, color, fmt, args);
va_end(args);
return r;
}
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = color_vfprintf(fp, color, fmt, args);
va_end(args);
return r;
}
/*
* This function splits the buffer by newlines and colors the lines individually.
*
* Returns 0 on success.
*/
int color_fwrite_lines(FILE *fp, const char *color,
size_t count, const char *buf)
{
if (!*color)
return fwrite(buf, count, 1, fp) != 1;
while (count) {
char *p = memchr(buf, '\n', count);
if (p != buf && (fputs(color, fp) < 0 ||
fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 ||
fputs(PERF_COLOR_RESET, fp) < 0))
return -1;
if (!p)
return 0;
if (fputc('\n', fp) < 0)
return -1;
count -= p + 1 - buf;
buf = p + 1;
}
return 0;
}
const char *get_percent_color(double percent)
{
const char *color = PERF_COLOR_NORMAL;
/*
* We color high-overhead entries in red, mid-overhead
* entries in green - and keep the low overhead places
* normal:
*/
if (fabs(percent) >= MIN_RED)
color = PERF_COLOR_RED;
else {
if (fabs(percent) > MIN_GREEN)
color = PERF_COLOR_GREEN;
}
return color;
}
int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
{
int r;
const char *color;
color = get_percent_color(percent);
r = color_fprintf(fp, color, fmt, percent);
return r;
}
int value_color_snprintf(char *bf, size_t size, const char *fmt, double value)
{
const char *color = get_percent_color(value);
return color_snprintf(bf, size, color, fmt, value);
}
int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...)
{
va_list args;
double percent;
va_start(args, fmt);
percent = va_arg(args, double);
va_end(args);
return value_color_snprintf(bf, size, fmt, percent);
}
int percent_color_len_snprintf(char *bf, size_t size, const char *fmt, ...)
{
va_list args;
int len;
double percent;
const char *color;
va_start(args, fmt);
len = va_arg(args, int);
percent = va_arg(args, double);
va_end(args);
color = get_percent_color(percent);
return color_snprintf(bf, size, color, fmt, len, percent);
}
| linux-master | tools/perf/util/color.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <errno.h>
#include <libgen.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <inttypes.h>
#include <byteswap.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <linux/stringify.h>
#include "build-id.h"
#include "event.h"
#include "debug.h"
#include "evlist.h"
#include "namespaces.h"
#include "symbol.h"
#include <elf.h>
#include "tsc.h"
#include "session.h"
#include "jit.h"
#include "jitdump.h"
#include "genelf.h"
#include "thread.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
struct jit_buf_desc {
struct perf_data *output;
struct perf_session *session;
struct machine *machine;
struct nsinfo *nsi;
union jr_entry *entry;
void *buf;
uint64_t sample_type;
size_t bufsize;
FILE *in;
bool needs_bswap; /* handles cross-endianness */
bool use_arch_timestamp;
void *debug_data;
void *unwinding_data;
uint64_t unwinding_size;
uint64_t unwinding_mapped_size;
uint64_t eh_frame_hdr_size;
size_t nr_debug_entries;
uint32_t code_load_count;
u64 bytes_written;
struct rb_root code_root;
char dir[PATH_MAX];
};
struct jit_tool {
struct perf_tool tool;
struct perf_data output;
struct perf_data input;
u64 bytes_written;
};
#define hmax(a, b) ((a) > (b) ? (a) : (b))
#define get_jit_tool(t) (container_of(tool, struct jit_tool, tool))
static int
jit_emit_elf(struct jit_buf_desc *jd,
char *filename,
const char *sym,
uint64_t code_addr,
const void *code,
int csize,
void *debug,
int nr_debug_entries,
void *unwinding,
uint32_t unwinding_header_size,
uint32_t unwinding_size)
{
int ret, fd, saved_errno;
struct nscookie nsc;
if (verbose > 0)
fprintf(stderr, "write ELF image %s\n", filename);
nsinfo__mountns_enter(jd->nsi, &nsc);
fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
saved_errno = errno;
nsinfo__mountns_exit(&nsc);
if (fd == -1) {
pr_warning("cannot create jit ELF %s: %s\n", filename, strerror(saved_errno));
return -1;
}
ret = jit_write_elf(fd, code_addr, sym, (const void *)code, csize, debug, nr_debug_entries,
unwinding, unwinding_header_size, unwinding_size);
close(fd);
if (ret) {
nsinfo__mountns_enter(jd->nsi, &nsc);
unlink(filename);
nsinfo__mountns_exit(&nsc);
}
return ret;
}
static void
jit_close(struct jit_buf_desc *jd)
{
if (!(jd && jd->in))
return;
funlockfile(jd->in);
fclose(jd->in);
jd->in = NULL;
}
static int
jit_validate_events(struct perf_session *session)
{
struct evsel *evsel;
/*
* check that all events use CLOCK_MONOTONIC
*/
evlist__for_each_entry(session->evlist, evsel) {
if (evsel->core.attr.use_clockid == 0 || evsel->core.attr.clockid != CLOCK_MONOTONIC)
return -1;
}
return 0;
}
static int
jit_open(struct jit_buf_desc *jd, const char *name)
{
struct jitheader header;
struct nscookie nsc;
struct jr_prefix *prefix;
ssize_t bs, bsz = 0;
void *n, *buf = NULL;
int ret, retval = -1;
nsinfo__mountns_enter(jd->nsi, &nsc);
jd->in = fopen(name, "r");
nsinfo__mountns_exit(&nsc);
if (!jd->in)
return -1;
bsz = hmax(sizeof(header), sizeof(*prefix));
buf = malloc(bsz);
if (!buf)
goto error;
/*
* protect from writer modifying the file while we are reading it
*/
flockfile(jd->in);
ret = fread(buf, sizeof(header), 1, jd->in);
if (ret != 1)
goto error;
memcpy(&header, buf, sizeof(header));
if (header.magic != JITHEADER_MAGIC) {
if (header.magic != JITHEADER_MAGIC_SW)
goto error;
jd->needs_bswap = true;
}
if (jd->needs_bswap) {
header.version = bswap_32(header.version);
header.total_size = bswap_32(header.total_size);
header.pid = bswap_32(header.pid);
header.elf_mach = bswap_32(header.elf_mach);
header.timestamp = bswap_64(header.timestamp);
header.flags = bswap_64(header.flags);
}
jd->use_arch_timestamp = header.flags & JITDUMP_FLAGS_ARCH_TIMESTAMP;
if (verbose > 2)
pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\nuse_arch_timestamp=%d\n",
header.version,
header.total_size,
(unsigned long long)header.timestamp,
header.pid,
header.elf_mach,
jd->use_arch_timestamp);
if (header.version > JITHEADER_VERSION) {
pr_err("wrong jitdump version %u, expected " __stringify(JITHEADER_VERSION),
header.version);
goto error;
}
if (header.flags & JITDUMP_FLAGS_RESERVED) {
pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
(unsigned long long)header.flags & JITDUMP_FLAGS_RESERVED);
goto error;
}
if (jd->use_arch_timestamp && !jd->session->time_conv.time_mult) {
pr_err("jitdump file uses arch timestamps but there is no timestamp conversion\n");
goto error;
}
/*
* validate event is using the correct clockid
*/
if (!jd->use_arch_timestamp && jit_validate_events(jd->session)) {
pr_err("error, jitted code must be sampled with perf record -k 1\n");
goto error;
}
bs = header.total_size - sizeof(header);
if (bs > bsz) {
n = realloc(buf, bs);
if (!n)
goto error;
bsz = bs;
buf = n;
/* read extra we do not know about */
ret = fread(buf, bs - bsz, 1, jd->in);
if (ret != 1)
goto error;
}
/*
* keep dirname for generating files and mmap records
*/
strcpy(jd->dir, name);
dirname(jd->dir);
free(buf);
return 0;
error:
free(buf);
funlockfile(jd->in);
fclose(jd->in);
return retval;
}
static union jr_entry *
jit_get_next_entry(struct jit_buf_desc *jd)
{
struct jr_prefix *prefix;
union jr_entry *jr;
void *addr;
size_t bs, size;
int id, ret;
if (!(jd && jd->in))
return NULL;
if (jd->buf == NULL) {
size_t sz = getpagesize();
if (sz < sizeof(*prefix))
sz = sizeof(*prefix);
jd->buf = malloc(sz);
if (jd->buf == NULL)
return NULL;
jd->bufsize = sz;
}
prefix = jd->buf;
/*
* file is still locked at this point
*/
ret = fread(prefix, sizeof(*prefix), 1, jd->in);
if (ret != 1)
return NULL;
if (jd->needs_bswap) {
prefix->id = bswap_32(prefix->id);
prefix->total_size = bswap_32(prefix->total_size);
prefix->timestamp = bswap_64(prefix->timestamp);
}
id = prefix->id;
size = prefix->total_size;
bs = (size_t)size;
if (bs < sizeof(*prefix))
return NULL;
if (id >= JIT_CODE_MAX) {
pr_warning("next_entry: unknown record type %d, skipping\n", id);
}
if (bs > jd->bufsize) {
void *n;
n = realloc(jd->buf, bs);
if (!n)
return NULL;
jd->buf = n;
jd->bufsize = bs;
}
addr = ((void *)jd->buf) + sizeof(*prefix);
ret = fread(addr, bs - sizeof(*prefix), 1, jd->in);
if (ret != 1)
return NULL;
jr = (union jr_entry *)jd->buf;
switch(id) {
case JIT_CODE_DEBUG_INFO:
if (jd->needs_bswap) {
uint64_t n;
jr->info.code_addr = bswap_64(jr->info.code_addr);
jr->info.nr_entry = bswap_64(jr->info.nr_entry);
for (n = 0 ; n < jr->info.nr_entry; n++) {
jr->info.entries[n].addr = bswap_64(jr->info.entries[n].addr);
jr->info.entries[n].lineno = bswap_32(jr->info.entries[n].lineno);
jr->info.entries[n].discrim = bswap_32(jr->info.entries[n].discrim);
}
}
break;
case JIT_CODE_UNWINDING_INFO:
if (jd->needs_bswap) {
jr->unwinding.unwinding_size = bswap_64(jr->unwinding.unwinding_size);
jr->unwinding.eh_frame_hdr_size = bswap_64(jr->unwinding.eh_frame_hdr_size);
jr->unwinding.mapped_size = bswap_64(jr->unwinding.mapped_size);
}
break;
case JIT_CODE_CLOSE:
break;
case JIT_CODE_LOAD:
if (jd->needs_bswap) {
jr->load.pid = bswap_32(jr->load.pid);
jr->load.tid = bswap_32(jr->load.tid);
jr->load.vma = bswap_64(jr->load.vma);
jr->load.code_addr = bswap_64(jr->load.code_addr);
jr->load.code_size = bswap_64(jr->load.code_size);
jr->load.code_index= bswap_64(jr->load.code_index);
}
jd->code_load_count++;
break;
case JIT_CODE_MOVE:
if (jd->needs_bswap) {
jr->move.pid = bswap_32(jr->move.pid);
jr->move.tid = bswap_32(jr->move.tid);
jr->move.vma = bswap_64(jr->move.vma);
jr->move.old_code_addr = bswap_64(jr->move.old_code_addr);
jr->move.new_code_addr = bswap_64(jr->move.new_code_addr);
jr->move.code_size = bswap_64(jr->move.code_size);
jr->move.code_index = bswap_64(jr->move.code_index);
}
break;
case JIT_CODE_MAX:
default:
/* skip unknown record (we have read them) */
break;
}
return jr;
}
static int
jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
{
ssize_t size;
size = perf_data__write(jd->output, event, event->header.size);
if (size < 0)
return -1;
jd->bytes_written += size;
return 0;
}
static pid_t jr_entry_pid(struct jit_buf_desc *jd, union jr_entry *jr)
{
if (jd->nsi && nsinfo__in_pidns(jd->nsi))
return nsinfo__tgid(jd->nsi);
return jr->load.pid;
}
static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
{
if (jd->nsi && nsinfo__in_pidns(jd->nsi))
return nsinfo__pid(jd->nsi);
return jr->load.tid;
}
static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
{
struct perf_tsc_conversion tc = { .time_shift = 0, };
struct perf_record_time_conv *time_conv = &jd->session->time_conv;
if (!jd->use_arch_timestamp)
return timestamp;
tc.time_shift = time_conv->time_shift;
tc.time_mult = time_conv->time_mult;
tc.time_zero = time_conv->time_zero;
/*
* The event TIME_CONV was extended for the fields from "time_cycles"
* when supported cap_user_time_short, for backward compatibility,
* checks the event size and assigns these extended fields if these
* fields are contained in the event.
*/
if (event_contains(*time_conv, time_cycles)) {
tc.time_cycles = time_conv->time_cycles;
tc.time_mask = time_conv->time_mask;
tc.cap_user_time_zero = time_conv->cap_user_time_zero;
tc.cap_user_time_short = time_conv->cap_user_time_short;
if (!tc.cap_user_time_zero)
return 0;
}
return tsc_to_perf_time(timestamp, &tc);
}
static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
{
struct perf_sample sample;
union perf_event *event;
struct perf_tool *tool = jd->session->tool;
uint64_t code, addr;
uintptr_t uaddr;
char *filename;
struct stat st;
size_t size;
u16 idr_size;
const char *sym;
uint64_t count;
int ret, csize, usize;
pid_t nspid, pid, tid;
struct {
u32 pid, tid;
u64 time;
} *id;
nspid = jr->load.pid;
pid = jr_entry_pid(jd, jr);
tid = jr_entry_tid(jd, jr);
csize = jr->load.code_size;
usize = jd->unwinding_mapped_size;
addr = jr->load.code_addr;
sym = (void *)((unsigned long)jr + sizeof(jr->load));
code = (unsigned long)jr + jr->load.p.total_size - csize;
count = jr->load.code_index;
idr_size = jd->machine->id_hdr_size;
event = calloc(1, sizeof(*event) + idr_size);
if (!event)
return -1;
filename = event->mmap2.filename;
size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
nspid,
count);
size++; /* for \0 */
size = PERF_ALIGN(size, sizeof(u64));
uaddr = (uintptr_t)code;
ret = jit_emit_elf(jd, filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries,
jd->unwinding_data, jd->eh_frame_hdr_size, jd->unwinding_size);
if (jd->debug_data && jd->nr_debug_entries) {
zfree(&jd->debug_data);
jd->nr_debug_entries = 0;
}
if (jd->unwinding_data && jd->eh_frame_hdr_size) {
zfree(&jd->unwinding_data);
jd->eh_frame_hdr_size = 0;
jd->unwinding_mapped_size = 0;
jd->unwinding_size = 0;
}
if (ret) {
free(event);
return -1;
}
if (nsinfo__stat(filename, &st, jd->nsi))
memset(&st, 0, sizeof(st));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.misc = PERF_RECORD_MISC_USER;
event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size) + idr_size);
event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
event->mmap2.start = addr;
event->mmap2.len = usize ? ALIGN_8(csize) + usize : csize;
event->mmap2.pid = pid;
event->mmap2.tid = tid;
event->mmap2.ino = st.st_ino;
event->mmap2.maj = major(st.st_dev);
event->mmap2.min = minor(st.st_dev);
event->mmap2.prot = st.st_mode;
event->mmap2.flags = MAP_SHARED;
event->mmap2.ino_generation = 1;
id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
if (jd->sample_type & PERF_SAMPLE_TID) {
id->pid = pid;
id->tid = tid;
}
if (jd->sample_type & PERF_SAMPLE_TIME)
id->time = convert_timestamp(jd, jr->load.p.timestamp);
/*
* create pseudo sample to induce dso hit increment
* use first address as sample address
*/
memset(&sample, 0, sizeof(sample));
sample.cpumode = PERF_RECORD_MISC_USER;
sample.pid = pid;
sample.tid = tid;
sample.time = id->time;
sample.ip = addr;
ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
if (ret)
goto out;
ret = jit_inject_event(jd, event);
/*
* mark dso as use to generate buildid in the header
*/
if (!ret)
build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
out:
free(event);
return ret;
}
static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
{
struct perf_sample sample;
union perf_event *event;
struct perf_tool *tool = jd->session->tool;
char *filename;
size_t size;
struct stat st;
int usize;
u16 idr_size;
int ret;
pid_t nspid, pid, tid;
struct {
u32 pid, tid;
u64 time;
} *id;
nspid = jr->load.pid;
pid = jr_entry_pid(jd, jr);
tid = jr_entry_tid(jd, jr);
usize = jd->unwinding_mapped_size;
idr_size = jd->machine->id_hdr_size;
/*
* +16 to account for sample_id_all (hack)
*/
event = calloc(1, sizeof(*event) + 16);
if (!event)
return -1;
filename = event->mmap2.filename;
size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
nspid,
jr->move.code_index);
size++; /* for \0 */
if (nsinfo__stat(filename, &st, jd->nsi))
memset(&st, 0, sizeof(st));
size = PERF_ALIGN(size, sizeof(u64));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.misc = PERF_RECORD_MISC_USER;
event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size) + idr_size);
event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
event->mmap2.start = jr->move.new_code_addr;
event->mmap2.len = usize ? ALIGN_8(jr->move.code_size) + usize
: jr->move.code_size;
event->mmap2.pid = pid;
event->mmap2.tid = tid;
event->mmap2.ino = st.st_ino;
event->mmap2.maj = major(st.st_dev);
event->mmap2.min = minor(st.st_dev);
event->mmap2.prot = st.st_mode;
event->mmap2.flags = MAP_SHARED;
event->mmap2.ino_generation = 1;
id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
if (jd->sample_type & PERF_SAMPLE_TID) {
id->pid = pid;
id->tid = tid;
}
if (jd->sample_type & PERF_SAMPLE_TIME)
id->time = convert_timestamp(jd, jr->load.p.timestamp);
/*
* create pseudo sample to induce dso hit increment
* use first address as sample address
*/
memset(&sample, 0, sizeof(sample));
sample.cpumode = PERF_RECORD_MISC_USER;
sample.pid = pid;
sample.tid = tid;
sample.time = id->time;
sample.ip = jr->move.new_code_addr;
ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
if (ret)
return ret;
ret = jit_inject_event(jd, event);
if (!ret)
build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
return ret;
}
static int jit_repipe_debug_info(struct jit_buf_desc *jd, union jr_entry *jr)
{
void *data;
size_t sz;
if (!(jd && jr))
return -1;
sz = jr->prefix.total_size - sizeof(jr->info);
data = malloc(sz);
if (!data)
return -1;
memcpy(data, &jr->info.entries, sz);
jd->debug_data = data;
/*
* we must use nr_entry instead of size here because
* we cannot distinguish actual entry from padding otherwise
*/
jd->nr_debug_entries = jr->info.nr_entry;
return 0;
}
static int
jit_repipe_unwinding_info(struct jit_buf_desc *jd, union jr_entry *jr)
{
void *unwinding_data;
uint32_t unwinding_data_size;
if (!(jd && jr))
return -1;
unwinding_data_size = jr->prefix.total_size - sizeof(jr->unwinding);
unwinding_data = malloc(unwinding_data_size);
if (!unwinding_data)
return -1;
memcpy(unwinding_data, &jr->unwinding.unwinding_data,
unwinding_data_size);
jd->eh_frame_hdr_size = jr->unwinding.eh_frame_hdr_size;
jd->unwinding_size = jr->unwinding.unwinding_size;
jd->unwinding_mapped_size = jr->unwinding.mapped_size;
jd->unwinding_data = unwinding_data;
return 0;
}
static int
jit_process_dump(struct jit_buf_desc *jd)
{
union jr_entry *jr;
int ret = 0;
while ((jr = jit_get_next_entry(jd))) {
switch(jr->prefix.id) {
case JIT_CODE_LOAD:
ret = jit_repipe_code_load(jd, jr);
break;
case JIT_CODE_MOVE:
ret = jit_repipe_code_move(jd, jr);
break;
case JIT_CODE_DEBUG_INFO:
ret = jit_repipe_debug_info(jd, jr);
break;
case JIT_CODE_UNWINDING_INFO:
ret = jit_repipe_unwinding_info(jd, jr);
break;
default:
ret = 0;
continue;
}
}
return ret;
}
static int
jit_inject(struct jit_buf_desc *jd, char *path)
{
int ret;
if (verbose > 0)
fprintf(stderr, "injecting: %s\n", path);
ret = jit_open(jd, path);
if (ret)
return -1;
ret = jit_process_dump(jd);
jit_close(jd);
if (verbose > 0)
fprintf(stderr, "injected: %s (%d)\n", path, ret);
return 0;
}
/*
* File must be with pattern .../jit-XXXX.dump
* where XXXX is the PID of the process which did the mmap()
* as captured in the RECORD_MMAP record
*/
static int
jit_detect(char *mmap_name, pid_t pid, struct nsinfo *nsi)
{
char *p;
char *end = NULL;
pid_t pid2;
if (verbose > 2)
fprintf(stderr, "jit marker trying : %s\n", mmap_name);
/*
* get file name
*/
p = strrchr(mmap_name, '/');
if (!p)
return -1;
/*
* match prefix
*/
if (strncmp(p, "/jit-", 5))
return -1;
/*
* skip prefix
*/
p += 5;
/*
* must be followed by a pid
*/
if (!isdigit(*p))
return -1;
pid2 = (int)strtol(p, &end, 10);
if (!end)
return -1;
/*
* pid does not match mmap pid
* pid==0 in system-wide mode (synthesized)
*/
if (pid && pid2 != nsinfo__nstgid(nsi))
return -1;
/*
* validate suffix
*/
if (strcmp(end, ".dump"))
return -1;
if (verbose > 0)
fprintf(stderr, "jit marker found: %s\n", mmap_name);
return 0;
}
static void jit_add_pid(struct machine *machine, pid_t pid)
{
struct thread *thread = machine__findnew_thread(machine, pid, pid);
if (!thread) {
pr_err("%s: thread %d not found or created\n", __func__, pid);
return;
}
thread__set_priv(thread, (void *)true);
thread__put(thread);
}
static bool jit_has_pid(struct machine *machine, pid_t pid)
{
struct thread *thread = machine__find_thread(machine, pid, pid);
void *priv;
if (!thread)
return false;
priv = thread__priv(thread);
thread__put(thread);
return (bool)priv;
}
int
jit_process(struct perf_session *session,
struct perf_data *output,
struct machine *machine,
char *filename,
pid_t pid,
pid_t tid,
u64 *nbytes)
{
struct thread *thread;
struct nsinfo *nsi;
struct evsel *first;
struct jit_buf_desc jd;
int ret;
thread = machine__findnew_thread(machine, pid, tid);
if (thread == NULL) {
pr_err("problem processing JIT mmap event, skipping it.\n");
return 0;
}
nsi = nsinfo__get(thread__nsinfo(thread));
thread__put(thread);
/*
* first, detect marker mmap (i.e., the jitdump mmap)
*/
if (jit_detect(filename, pid, nsi)) {
nsinfo__put(nsi);
/*
* Strip //anon*, [anon:* and /memfd:* mmaps if we processed a jitdump for this pid
*/
if (jit_has_pid(machine, pid) &&
((strncmp(filename, "//anon", 6) == 0) ||
(strncmp(filename, "[anon:", 6) == 0) ||
(strncmp(filename, "/memfd:", 7) == 0)))
return 1;
return 0;
}
memset(&jd, 0, sizeof(jd));
jd.session = session;
jd.output = output;
jd.machine = machine;
jd.nsi = nsi;
/*
* track sample_type to compute id_all layout
* perf sets the same sample type to all events as of now
*/
first = evlist__first(session->evlist);
jd.sample_type = first->core.attr.sample_type;
*nbytes = 0;
ret = jit_inject(&jd, filename);
if (!ret) {
jit_add_pid(machine, pid);
*nbytes = jd.bytes_written;
ret = 1;
}
nsinfo__put(jd.nsi);
free(jd.buf);
return ret;
}
| linux-master | tools/perf/util/jitdump.c |
// SPDX-License-Identifier: GPL-2.0
#include "block-range.h"
#include "annotate.h"
#include <assert.h>
#include <stdlib.h>
struct {
struct rb_root root;
u64 blocks;
} block_ranges;
static void block_range__debug(void)
{
#ifndef NDEBUG
struct rb_node *rb;
u64 old = 0; /* NULL isn't executable */
for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) {
struct block_range *entry = rb_entry(rb, struct block_range, node);
assert(old < entry->start);
assert(entry->start <= entry->end); /* single instruction block; jump to a jump */
old = entry->end;
}
#endif
}
struct block_range *block_range__find(u64 addr)
{
struct rb_node **p = &block_ranges.root.rb_node;
struct rb_node *parent = NULL;
struct block_range *entry;
while (*p != NULL) {
parent = *p;
entry = rb_entry(parent, struct block_range, node);
if (addr < entry->start)
p = &parent->rb_left;
else if (addr > entry->end)
p = &parent->rb_right;
else
return entry;
}
return NULL;
}
static inline void rb_link_left_of_node(struct rb_node *left, struct rb_node *node)
{
struct rb_node **p = &node->rb_left;
while (*p) {
node = *p;
p = &node->rb_right;
}
rb_link_node(left, node, p);
}
static inline void rb_link_right_of_node(struct rb_node *right, struct rb_node *node)
{
struct rb_node **p = &node->rb_right;
while (*p) {
node = *p;
p = &node->rb_left;
}
rb_link_node(right, node, p);
}
/**
* block_range__create
* @start: branch target starting this basic block
* @end: branch ending this basic block
*
* Create all the required block ranges to precisely span the given range.
*/
struct block_range_iter block_range__create(u64 start, u64 end)
{
struct rb_node **p = &block_ranges.root.rb_node;
struct rb_node *n, *parent = NULL;
struct block_range *next, *entry = NULL;
struct block_range_iter iter = { NULL, NULL };
while (*p != NULL) {
parent = *p;
entry = rb_entry(parent, struct block_range, node);
if (start < entry->start)
p = &parent->rb_left;
else if (start > entry->end)
p = &parent->rb_right;
else
break;
}
/*
* Didn't find anything.. there's a hole at @start, however @end might
* be inside/behind the next range.
*/
if (!*p) {
if (!entry) /* tree empty */
goto do_whole;
/*
* If the last node is before, advance one to find the next.
*/
n = parent;
if (entry->end < start) {
n = rb_next(n);
if (!n)
goto do_whole;
}
next = rb_entry(n, struct block_range, node);
if (next->start <= end) { /* add head: [start...][n->start...] */
struct block_range *head = malloc(sizeof(struct block_range));
if (!head)
return iter;
*head = (struct block_range){
.start = start,
.end = next->start - 1,
.is_target = 1,
.is_branch = 0,
};
rb_link_left_of_node(&head->node, &next->node);
rb_insert_color(&head->node, &block_ranges.root);
block_range__debug();
iter.start = head;
goto do_tail;
}
do_whole:
/*
* The whole [start..end] range is non-overlapping.
*/
entry = malloc(sizeof(struct block_range));
if (!entry)
return iter;
*entry = (struct block_range){
.start = start,
.end = end,
.is_target = 1,
.is_branch = 1,
};
rb_link_node(&entry->node, parent, p);
rb_insert_color(&entry->node, &block_ranges.root);
block_range__debug();
iter.start = entry;
iter.end = entry;
goto done;
}
/*
* We found a range that overlapped with ours, split if needed.
*/
if (entry->start < start) { /* split: [e->start...][start...] */
struct block_range *head = malloc(sizeof(struct block_range));
if (!head)
return iter;
*head = (struct block_range){
.start = entry->start,
.end = start - 1,
.is_target = entry->is_target,
.is_branch = 0,
.coverage = entry->coverage,
.entry = entry->entry,
};
entry->start = start;
entry->is_target = 1;
entry->entry = 0;
rb_link_left_of_node(&head->node, &entry->node);
rb_insert_color(&head->node, &block_ranges.root);
block_range__debug();
} else if (entry->start == start)
entry->is_target = 1;
iter.start = entry;
do_tail:
/*
* At this point we've got: @iter.start = [@start...] but @end can still be
* inside or beyond it.
*/
entry = iter.start;
for (;;) {
/*
* If @end is inside @entry, split.
*/
if (end < entry->end) { /* split: [...end][...e->end] */
struct block_range *tail = malloc(sizeof(struct block_range));
if (!tail)
return iter;
*tail = (struct block_range){
.start = end + 1,
.end = entry->end,
.is_target = 0,
.is_branch = entry->is_branch,
.coverage = entry->coverage,
.taken = entry->taken,
.pred = entry->pred,
};
entry->end = end;
entry->is_branch = 1;
entry->taken = 0;
entry->pred = 0;
rb_link_right_of_node(&tail->node, &entry->node);
rb_insert_color(&tail->node, &block_ranges.root);
block_range__debug();
iter.end = entry;
goto done;
}
/*
* If @end matches @entry, done
*/
if (end == entry->end) {
entry->is_branch = 1;
iter.end = entry;
goto done;
}
next = block_range__next(entry);
if (!next)
goto add_tail;
/*
* If @end is in beyond @entry but not inside @next, add tail.
*/
if (end < next->start) { /* add tail: [...e->end][...end] */
struct block_range *tail;
add_tail:
tail = malloc(sizeof(struct block_range));
if (!tail)
return iter;
*tail = (struct block_range){
.start = entry->end + 1,
.end = end,
.is_target = 0,
.is_branch = 1,
};
rb_link_right_of_node(&tail->node, &entry->node);
rb_insert_color(&tail->node, &block_ranges.root);
block_range__debug();
iter.end = tail;
goto done;
}
/*
* If there is a hole between @entry and @next, fill it.
*/
if (entry->end + 1 != next->start) {
struct block_range *hole = malloc(sizeof(struct block_range));
if (!hole)
return iter;
*hole = (struct block_range){
.start = entry->end + 1,
.end = next->start - 1,
.is_target = 0,
.is_branch = 0,
};
rb_link_left_of_node(&hole->node, &next->node);
rb_insert_color(&hole->node, &block_ranges.root);
block_range__debug();
}
entry = next;
}
done:
assert(iter.start->start == start && iter.start->is_target);
assert(iter.end->end == end && iter.end->is_branch);
block_ranges.blocks++;
return iter;
}
/*
* Compute coverage as:
*
* br->coverage / br->sym->max_coverage
*
* This ensures each symbol has a 100% spot, to reflect that each symbol has a
* most covered section.
*
* Returns [0-1] for coverage and -1 if we had no data what so ever or the
* symbol does not exist.
*/
double block_range__coverage(struct block_range *br)
{
struct symbol *sym;
if (!br) {
if (block_ranges.blocks)
return 0;
return -1;
}
sym = br->sym;
if (!sym)
return -1;
return (double)br->coverage / symbol__annotation(sym)->max_coverage;
}
| linux-master | tools/perf/util/block-range.c |
// SPDX-License-Identifier: GPL-2.0
#include <subcmd/parse-options.h>
#include "evsel.h"
#include "cgroup.h"
#include "evlist.h"
#include "rblist.h"
#include "metricgroup.h"
#include "stat.h"
#include <linux/zalloc.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <api/fs/fs.h>
#include <ftw.h>
#include <regex.h>
int nr_cgroups;
bool cgrp_event_expanded;
/* used to match cgroup name with patterns */
struct cgroup_name {
struct list_head list;
bool used;
char name[];
};
static LIST_HEAD(cgroup_list);
static int open_cgroup(const char *name)
{
char path[PATH_MAX + 1];
char mnt[PATH_MAX + 1];
int fd;
if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, "perf_event"))
return -1;
scnprintf(path, PATH_MAX, "%s/%s", mnt, name);
fd = open(path, O_RDONLY);
if (fd == -1)
fprintf(stderr, "no access to cgroup %s\n", path);
return fd;
}
#ifdef HAVE_FILE_HANDLE
int read_cgroup_id(struct cgroup *cgrp)
{
char path[PATH_MAX + 1];
char mnt[PATH_MAX + 1];
struct {
struct file_handle fh;
uint64_t cgroup_id;
} handle;
int mount_id;
if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, "perf_event"))
return -1;
scnprintf(path, PATH_MAX, "%s/%s", mnt, cgrp->name);
handle.fh.handle_bytes = sizeof(handle.cgroup_id);
if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0)
return -1;
cgrp->id = handle.cgroup_id;
return 0;
}
#endif /* HAVE_FILE_HANDLE */
#ifndef CGROUP2_SUPER_MAGIC
#define CGROUP2_SUPER_MAGIC 0x63677270
#endif
int cgroup_is_v2(const char *subsys)
{
char mnt[PATH_MAX + 1];
struct statfs stbuf;
if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, subsys))
return -1;
if (statfs(mnt, &stbuf) < 0)
return -1;
return (stbuf.f_type == CGROUP2_SUPER_MAGIC);
}
static struct cgroup *evlist__find_cgroup(struct evlist *evlist, const char *str)
{
struct evsel *counter;
/*
* check if cgrp is already defined, if so we reuse it
*/
evlist__for_each_entry(evlist, counter) {
if (!counter->cgrp)
continue;
if (!strcmp(counter->cgrp->name, str))
return cgroup__get(counter->cgrp);
}
return NULL;
}
static struct cgroup *cgroup__new(const char *name, bool do_open)
{
struct cgroup *cgroup = zalloc(sizeof(*cgroup));
if (cgroup != NULL) {
refcount_set(&cgroup->refcnt, 1);
cgroup->name = strdup(name);
if (!cgroup->name)
goto out_err;
if (do_open) {
cgroup->fd = open_cgroup(name);
if (cgroup->fd == -1)
goto out_free_name;
} else {
cgroup->fd = -1;
}
}
return cgroup;
out_free_name:
zfree(&cgroup->name);
out_err:
free(cgroup);
return NULL;
}
struct cgroup *evlist__findnew_cgroup(struct evlist *evlist, const char *name)
{
struct cgroup *cgroup = evlist__find_cgroup(evlist, name);
return cgroup ?: cgroup__new(name, true);
}
static int add_cgroup(struct evlist *evlist, const char *str)
{
struct evsel *counter;
struct cgroup *cgrp = evlist__findnew_cgroup(evlist, str);
int n;
if (!cgrp)
return -1;
/*
* find corresponding event
* if add cgroup N, then need to find event N
*/
n = 0;
evlist__for_each_entry(evlist, counter) {
if (n == nr_cgroups)
goto found;
n++;
}
cgroup__put(cgrp);
return -1;
found:
counter->cgrp = cgrp;
return 0;
}
static void cgroup__delete(struct cgroup *cgroup)
{
if (cgroup->fd >= 0)
close(cgroup->fd);
zfree(&cgroup->name);
free(cgroup);
}
void cgroup__put(struct cgroup *cgrp)
{
if (cgrp && refcount_dec_and_test(&cgrp->refcnt)) {
cgroup__delete(cgrp);
}
}
struct cgroup *cgroup__get(struct cgroup *cgroup)
{
if (cgroup)
refcount_inc(&cgroup->refcnt);
return cgroup;
}
static void evsel__set_default_cgroup(struct evsel *evsel, struct cgroup *cgroup)
{
if (evsel->cgrp == NULL)
evsel->cgrp = cgroup__get(cgroup);
}
void evlist__set_default_cgroup(struct evlist *evlist, struct cgroup *cgroup)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
evsel__set_default_cgroup(evsel, cgroup);
}
/* helper function for ftw() in match_cgroups and list_cgroups */
static int add_cgroup_name(const char *fpath, const struct stat *sb __maybe_unused,
int typeflag, struct FTW *ftwbuf __maybe_unused)
{
struct cgroup_name *cn;
if (typeflag != FTW_D)
return 0;
cn = malloc(sizeof(*cn) + strlen(fpath) + 1);
if (cn == NULL)
return -1;
cn->used = false;
strcpy(cn->name, fpath);
list_add_tail(&cn->list, &cgroup_list);
return 0;
}
static int check_and_add_cgroup_name(const char *fpath)
{
struct cgroup_name *cn;
list_for_each_entry(cn, &cgroup_list, list) {
if (!strcmp(cn->name, fpath))
return 0;
}
/* pretend if it's added by ftw() */
return add_cgroup_name(fpath, NULL, FTW_D, NULL);
}
static void release_cgroup_list(void)
{
struct cgroup_name *cn;
while (!list_empty(&cgroup_list)) {
cn = list_first_entry(&cgroup_list, struct cgroup_name, list);
list_del(&cn->list);
free(cn);
}
}
/* collect given cgroups only */
static int list_cgroups(const char *str)
{
const char *p, *e, *eos = str + strlen(str);
struct cgroup_name *cn;
char *s;
/* use given name as is when no regex is given */
for (;;) {
p = strchr(str, ',');
e = p ? p : eos;
if (e - str) {
int ret;
s = strndup(str, e - str);
if (!s)
return -1;
ret = check_and_add_cgroup_name(s);
free(s);
if (ret < 0)
return -1;
} else {
if (check_and_add_cgroup_name("/") < 0)
return -1;
}
if (!p)
break;
str = p+1;
}
/* these groups will be used */
list_for_each_entry(cn, &cgroup_list, list)
cn->used = true;
return 0;
}
/* collect all cgroups first and then match with the pattern */
static int match_cgroups(const char *str)
{
char mnt[PATH_MAX];
const char *p, *e, *eos = str + strlen(str);
struct cgroup_name *cn;
regex_t reg;
int prefix_len;
char *s;
if (cgroupfs_find_mountpoint(mnt, sizeof(mnt), "perf_event"))
return -1;
/* cgroup_name will have a full path, skip the root directory */
prefix_len = strlen(mnt);
/* collect all cgroups in the cgroup_list */
if (nftw(mnt, add_cgroup_name, 20, 0) < 0)
return -1;
for (;;) {
p = strchr(str, ',');
e = p ? p : eos;
/* allow empty cgroups, i.e., skip */
if (e - str) {
/* termination added */
s = strndup(str, e - str);
if (!s)
return -1;
if (regcomp(®, s, REG_NOSUB)) {
free(s);
return -1;
}
/* check cgroup name with the pattern */
list_for_each_entry(cn, &cgroup_list, list) {
char *name = cn->name + prefix_len;
if (name[0] == '/' && name[1])
name++;
if (!regexec(®, name, 0, NULL, 0))
cn->used = true;
}
regfree(®);
free(s);
} else {
/* first entry to root cgroup */
cn = list_first_entry(&cgroup_list, struct cgroup_name,
list);
cn->used = true;
}
if (!p)
break;
str = p+1;
}
return prefix_len;
}
int parse_cgroups(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct evlist *evlist = *(struct evlist **)opt->value;
struct evsel *counter;
struct cgroup *cgrp = NULL;
const char *p, *e, *eos = str + strlen(str);
char *s;
int ret, i;
if (list_empty(&evlist->core.entries)) {
fprintf(stderr, "must define events before cgroups\n");
return -1;
}
for (;;) {
p = strchr(str, ',');
e = p ? p : eos;
/* allow empty cgroups, i.e., skip */
if (e - str) {
/* termination added */
s = strndup(str, e - str);
if (!s)
return -1;
ret = add_cgroup(evlist, s);
free(s);
if (ret)
return -1;
}
/* nr_cgroups is increased een for empty cgroups */
nr_cgroups++;
if (!p)
break;
str = p+1;
}
/* for the case one cgroup combine to multiple events */
i = 0;
if (nr_cgroups == 1) {
evlist__for_each_entry(evlist, counter) {
if (i == 0)
cgrp = counter->cgrp;
else {
counter->cgrp = cgrp;
refcount_inc(&cgrp->refcnt);
}
i++;
}
}
return 0;
}
static bool has_pattern_string(const char *str)
{
return !!strpbrk(str, "{}[]()|*+?^$");
}
int evlist__expand_cgroup(struct evlist *evlist, const char *str,
struct rblist *metric_events, bool open_cgroup)
{
struct evlist *orig_list, *tmp_list;
struct evsel *pos, *evsel, *leader;
struct rblist orig_metric_events;
struct cgroup *cgrp = NULL;
struct cgroup_name *cn;
int ret = -1;
int prefix_len;
if (evlist->core.nr_entries == 0) {
fprintf(stderr, "must define events before cgroups\n");
return -EINVAL;
}
orig_list = evlist__new();
tmp_list = evlist__new();
if (orig_list == NULL || tmp_list == NULL) {
fprintf(stderr, "memory allocation failed\n");
return -ENOMEM;
}
/* save original events and init evlist */
evlist__splice_list_tail(orig_list, &evlist->core.entries);
evlist->core.nr_entries = 0;
if (metric_events) {
orig_metric_events = *metric_events;
rblist__init(metric_events);
} else {
rblist__init(&orig_metric_events);
}
if (has_pattern_string(str))
prefix_len = match_cgroups(str);
else
prefix_len = list_cgroups(str);
if (prefix_len < 0)
goto out_err;
list_for_each_entry(cn, &cgroup_list, list) {
char *name;
if (!cn->used)
continue;
/* cgroup_name might have a full path, skip the prefix */
name = cn->name + prefix_len;
if (name[0] == '/' && name[1])
name++;
cgrp = cgroup__new(name, open_cgroup);
if (cgrp == NULL)
goto out_err;
leader = NULL;
evlist__for_each_entry(orig_list, pos) {
evsel = evsel__clone(pos);
if (evsel == NULL)
goto out_err;
cgroup__put(evsel->cgrp);
evsel->cgrp = cgroup__get(cgrp);
if (evsel__is_group_leader(pos))
leader = evsel;
evsel__set_leader(evsel, leader);
evlist__add(tmp_list, evsel);
}
/* cgroup__new() has a refcount, release it here */
cgroup__put(cgrp);
nr_cgroups++;
if (metric_events) {
if (metricgroup__copy_metric_events(tmp_list, cgrp,
metric_events,
&orig_metric_events) < 0)
goto out_err;
}
evlist__splice_list_tail(evlist, &tmp_list->core.entries);
tmp_list->core.nr_entries = 0;
}
if (list_empty(&evlist->core.entries)) {
fprintf(stderr, "no cgroup matched: %s\n", str);
goto out_err;
}
ret = 0;
cgrp_event_expanded = true;
out_err:
evlist__delete(orig_list);
evlist__delete(tmp_list);
rblist__exit(&orig_metric_events);
release_cgroup_list();
return ret;
}
static struct cgroup *__cgroup__findnew(struct rb_root *root, uint64_t id,
bool create, const char *path)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct cgroup *cgrp;
while (*p != NULL) {
parent = *p;
cgrp = rb_entry(parent, struct cgroup, node);
if (cgrp->id == id)
return cgrp;
if (cgrp->id < id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
if (!create)
return NULL;
cgrp = malloc(sizeof(*cgrp));
if (cgrp == NULL)
return NULL;
cgrp->name = strdup(path);
if (cgrp->name == NULL) {
free(cgrp);
return NULL;
}
cgrp->fd = -1;
cgrp->id = id;
refcount_set(&cgrp->refcnt, 1);
rb_link_node(&cgrp->node, parent, p);
rb_insert_color(&cgrp->node, root);
return cgrp;
}
struct cgroup *cgroup__findnew(struct perf_env *env, uint64_t id,
const char *path)
{
struct cgroup *cgrp;
down_write(&env->cgroups.lock);
cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path);
up_write(&env->cgroups.lock);
return cgrp;
}
struct cgroup *cgroup__find(struct perf_env *env, uint64_t id)
{
struct cgroup *cgrp;
down_read(&env->cgroups.lock);
cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL);
up_read(&env->cgroups.lock);
return cgrp;
}
void perf_env__purge_cgroups(struct perf_env *env)
{
struct rb_node *node;
struct cgroup *cgrp;
down_write(&env->cgroups.lock);
while (!RB_EMPTY_ROOT(&env->cgroups.tree)) {
node = rb_first(&env->cgroups.tree);
cgrp = rb_entry(node, struct cgroup, node);
rb_erase(node, &env->cgroups.tree);
cgroup__put(cgrp);
}
up_write(&env->cgroups.lock);
}
| linux-master | tools/perf/util/cgroup.c |
#include "dso.h"
#include "symbol.h"
#include "symsrc.h"
#include <errno.h>
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
#include <stdlib.h>
#include <byteswap.h>
#include <sys/stat.h>
#include <linux/zalloc.h>
#include <internal/lib.h>
static bool check_need_swap(int file_endian)
{
const int data = 1;
u8 *check = (u8 *)&data;
int host_endian;
if (check[0] == 1)
host_endian = ELFDATA2LSB;
else
host_endian = ELFDATA2MSB;
return host_endian != file_endian;
}
#define NOTE_ALIGN(sz) (((sz) + 3) & ~3)
#define NT_GNU_BUILD_ID 3
static int read_build_id(void *note_data, size_t note_len, struct build_id *bid,
bool need_swap)
{
size_t size = sizeof(bid->data);
struct {
u32 n_namesz;
u32 n_descsz;
u32 n_type;
} *nhdr;
void *ptr;
ptr = note_data;
while (ptr < (note_data + note_len)) {
const char *name;
size_t namesz, descsz;
nhdr = ptr;
if (need_swap) {
nhdr->n_namesz = bswap_32(nhdr->n_namesz);
nhdr->n_descsz = bswap_32(nhdr->n_descsz);
nhdr->n_type = bswap_32(nhdr->n_type);
}
namesz = NOTE_ALIGN(nhdr->n_namesz);
descsz = NOTE_ALIGN(nhdr->n_descsz);
ptr += sizeof(*nhdr);
name = ptr;
ptr += namesz;
if (nhdr->n_type == NT_GNU_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU")) {
if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
size_t sz = min(size, descsz);
memcpy(bid->data, ptr, sz);
memset(bid->data + sz, 0, size - sz);
bid->size = sz;
return 0;
}
}
ptr += descsz;
}
return -1;
}
int filename__read_debuglink(const char *filename __maybe_unused,
char *debuglink __maybe_unused,
size_t size __maybe_unused)
{
return -1;
}
/*
* Just try PT_NOTE header otherwise fails
*/
int filename__read_build_id(const char *filename, struct build_id *bid)
{
FILE *fp;
int ret = -1;
bool need_swap = false;
u8 e_ident[EI_NIDENT];
size_t buf_size;
void *buf;
int i;
fp = fopen(filename, "r");
if (fp == NULL)
return -1;
if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
goto out;
if (memcmp(e_ident, ELFMAG, SELFMAG) ||
e_ident[EI_VERSION] != EV_CURRENT)
goto out;
need_swap = check_need_swap(e_ident[EI_DATA]);
/* for simplicity */
fseek(fp, 0, SEEK_SET);
if (e_ident[EI_CLASS] == ELFCLASS32) {
Elf32_Ehdr ehdr;
Elf32_Phdr *phdr;
if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
goto out;
if (need_swap) {
ehdr.e_phoff = bswap_32(ehdr.e_phoff);
ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
ehdr.e_phnum = bswap_16(ehdr.e_phnum);
}
buf_size = ehdr.e_phentsize * ehdr.e_phnum;
buf = malloc(buf_size);
if (buf == NULL)
goto out;
fseek(fp, ehdr.e_phoff, SEEK_SET);
if (fread(buf, buf_size, 1, fp) != 1)
goto out_free;
for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
void *tmp;
long offset;
if (need_swap) {
phdr->p_type = bswap_32(phdr->p_type);
phdr->p_offset = bswap_32(phdr->p_offset);
phdr->p_filesz = bswap_32(phdr->p_filesz);
}
if (phdr->p_type != PT_NOTE)
continue;
buf_size = phdr->p_filesz;
offset = phdr->p_offset;
tmp = realloc(buf, buf_size);
if (tmp == NULL)
goto out_free;
buf = tmp;
fseek(fp, offset, SEEK_SET);
if (fread(buf, buf_size, 1, fp) != 1)
goto out_free;
ret = read_build_id(buf, buf_size, bid, need_swap);
if (ret == 0)
ret = bid->size;
break;
}
} else {
Elf64_Ehdr ehdr;
Elf64_Phdr *phdr;
if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
goto out;
if (need_swap) {
ehdr.e_phoff = bswap_64(ehdr.e_phoff);
ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
ehdr.e_phnum = bswap_16(ehdr.e_phnum);
}
buf_size = ehdr.e_phentsize * ehdr.e_phnum;
buf = malloc(buf_size);
if (buf == NULL)
goto out;
fseek(fp, ehdr.e_phoff, SEEK_SET);
if (fread(buf, buf_size, 1, fp) != 1)
goto out_free;
for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
void *tmp;
long offset;
if (need_swap) {
phdr->p_type = bswap_32(phdr->p_type);
phdr->p_offset = bswap_64(phdr->p_offset);
phdr->p_filesz = bswap_64(phdr->p_filesz);
}
if (phdr->p_type != PT_NOTE)
continue;
buf_size = phdr->p_filesz;
offset = phdr->p_offset;
tmp = realloc(buf, buf_size);
if (tmp == NULL)
goto out_free;
buf = tmp;
fseek(fp, offset, SEEK_SET);
if (fread(buf, buf_size, 1, fp) != 1)
goto out_free;
ret = read_build_id(buf, buf_size, bid, need_swap);
if (ret == 0)
ret = bid->size;
break;
}
}
out_free:
free(buf);
out:
fclose(fp);
return ret;
}
int sysfs__read_build_id(const char *filename, struct build_id *bid)
{
int fd;
int ret = -1;
struct stat stbuf;
size_t buf_size;
void *buf;
fd = open(filename, O_RDONLY);
if (fd < 0)
return -1;
if (fstat(fd, &stbuf) < 0)
goto out;
buf_size = stbuf.st_size;
buf = malloc(buf_size);
if (buf == NULL)
goto out;
if (read(fd, buf, buf_size) != (ssize_t) buf_size)
goto out_free;
ret = read_build_id(buf, buf_size, bid, false);
out_free:
free(buf);
out:
close(fd);
return ret;
}
int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
enum dso_binary_type type)
{
int fd = open(name, O_RDONLY);
if (fd < 0)
goto out_errno;
ss->name = strdup(name);
if (!ss->name)
goto out_close;
ss->fd = fd;
ss->type = type;
return 0;
out_close:
close(fd);
out_errno:
dso->load_errno = errno;
return -1;
}
bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused)
{
/* Assume all sym sources could be a runtime image. */
return true;
}
bool symsrc__has_symtab(struct symsrc *ss __maybe_unused)
{
return false;
}
void symsrc__destroy(struct symsrc *ss)
{
zfree(&ss->name);
close(ss->fd);
}
int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused,
struct symsrc *ss __maybe_unused)
{
return 0;
}
static int fd__is_64_bit(int fd)
{
u8 e_ident[EI_NIDENT];
if (lseek(fd, 0, SEEK_SET))
return -1;
if (readn(fd, e_ident, sizeof(e_ident)) != sizeof(e_ident))
return -1;
if (memcmp(e_ident, ELFMAG, SELFMAG) ||
e_ident[EI_VERSION] != EV_CURRENT)
return -1;
return e_ident[EI_CLASS] == ELFCLASS64;
}
enum dso_type dso__type_fd(int fd)
{
Elf64_Ehdr ehdr;
int ret;
ret = fd__is_64_bit(fd);
if (ret < 0)
return DSO__TYPE_UNKNOWN;
if (ret)
return DSO__TYPE_64BIT;
if (readn(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr))
return DSO__TYPE_UNKNOWN;
if (ehdr.e_machine == EM_X86_64)
return DSO__TYPE_X32BIT;
return DSO__TYPE_32BIT;
}
int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
struct symsrc *ss,
struct symsrc *runtime_ss __maybe_unused,
int kmodule __maybe_unused)
{
struct build_id bid;
int ret;
ret = fd__is_64_bit(ss->fd);
if (ret >= 0)
dso->is_64_bit = ret;
if (filename__read_build_id(ss->name, &bid) > 0)
dso__set_build_id(dso, &bid);
return 0;
}
int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused,
mapfn_t mapfn __maybe_unused, void *data __maybe_unused,
bool *is_64_bit __maybe_unused)
{
return -1;
}
int kcore_extract__create(struct kcore_extract *kce __maybe_unused)
{
return -1;
}
void kcore_extract__delete(struct kcore_extract *kce __maybe_unused)
{
}
int kcore_copy(const char *from_dir __maybe_unused,
const char *to_dir __maybe_unused)
{
return -1;
}
void symbol__elf_init(void)
{
}
char *dso__demangle_sym(struct dso *dso __maybe_unused,
int kmodule __maybe_unused,
const char *elf_name __maybe_unused)
{
return NULL;
}
bool filename__has_section(const char *filename __maybe_unused, const char *sec __maybe_unused)
{
return false;
}
| linux-master | tools/perf/util/symbol-minimal.c |
// SPDX-License-Identifier: GPL-2.0
#include <math.h>
#include <stdio.h>
#include "evsel.h"
#include "stat.h"
#include "color.h"
#include "debug.h"
#include "pmu.h"
#include "rblist.h"
#include "evlist.h"
#include "expr.h"
#include "metricgroup.h"
#include "cgroup.h"
#include "units.h"
#include <linux/zalloc.h>
#include "iostat.h"
#include "util/hashmap.h"
struct stats walltime_nsecs_stats;
struct rusage_stats ru_stats;
enum {
CTX_BIT_USER = 1 << 0,
CTX_BIT_KERNEL = 1 << 1,
CTX_BIT_HV = 1 << 2,
CTX_BIT_HOST = 1 << 3,
CTX_BIT_IDLE = 1 << 4,
CTX_BIT_MAX = 1 << 5,
};
enum stat_type {
STAT_NONE = 0,
STAT_NSECS,
STAT_CYCLES,
STAT_INSTRUCTIONS,
STAT_STALLED_CYCLES_FRONT,
STAT_STALLED_CYCLES_BACK,
STAT_BRANCHES,
STAT_BRANCH_MISS,
STAT_CACHE_REFS,
STAT_CACHE_MISSES,
STAT_L1_DCACHE,
STAT_L1_ICACHE,
STAT_LL_CACHE,
STAT_ITLB_CACHE,
STAT_DTLB_CACHE,
STAT_L1D_MISS,
STAT_L1I_MISS,
STAT_LL_MISS,
STAT_DTLB_MISS,
STAT_ITLB_MISS,
STAT_MAX
};
static int evsel_context(const struct evsel *evsel)
{
int ctx = 0;
if (evsel->core.attr.exclude_kernel)
ctx |= CTX_BIT_KERNEL;
if (evsel->core.attr.exclude_user)
ctx |= CTX_BIT_USER;
if (evsel->core.attr.exclude_hv)
ctx |= CTX_BIT_HV;
if (evsel->core.attr.exclude_host)
ctx |= CTX_BIT_HOST;
if (evsel->core.attr.exclude_idle)
ctx |= CTX_BIT_IDLE;
return ctx;
}
void perf_stat__reset_shadow_stats(void)
{
memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
memset(&ru_stats, 0, sizeof(ru_stats));
}
static enum stat_type evsel__stat_type(const struct evsel *evsel)
{
/* Fake perf_hw_cache_op_id values for use with evsel__match. */
u64 PERF_COUNT_hw_cache_l1d_miss = PERF_COUNT_HW_CACHE_L1D |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
u64 PERF_COUNT_hw_cache_l1i_miss = PERF_COUNT_HW_CACHE_L1I |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
u64 PERF_COUNT_hw_cache_ll_miss = PERF_COUNT_HW_CACHE_LL |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
u64 PERF_COUNT_hw_cache_dtlb_miss = PERF_COUNT_HW_CACHE_DTLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
u64 PERF_COUNT_hw_cache_itlb_miss = PERF_COUNT_HW_CACHE_ITLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
if (evsel__is_clock(evsel))
return STAT_NSECS;
else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES))
return STAT_CYCLES;
else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS))
return STAT_INSTRUCTIONS;
else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
return STAT_STALLED_CYCLES_FRONT;
else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND))
return STAT_STALLED_CYCLES_BACK;
else if (evsel__match(evsel, HARDWARE, HW_BRANCH_INSTRUCTIONS))
return STAT_BRANCHES;
else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES))
return STAT_BRANCH_MISS;
else if (evsel__match(evsel, HARDWARE, HW_CACHE_REFERENCES))
return STAT_CACHE_REFS;
else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES))
return STAT_CACHE_MISSES;
else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1D))
return STAT_L1_DCACHE;
else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1I))
return STAT_L1_ICACHE;
else if (evsel__match(evsel, HW_CACHE, HW_CACHE_LL))
return STAT_LL_CACHE;
else if (evsel__match(evsel, HW_CACHE, HW_CACHE_DTLB))
return STAT_DTLB_CACHE;
else if (evsel__match(evsel, HW_CACHE, HW_CACHE_ITLB))
return STAT_ITLB_CACHE;
else if (evsel__match(evsel, HW_CACHE, hw_cache_l1d_miss))
return STAT_L1D_MISS;
else if (evsel__match(evsel, HW_CACHE, hw_cache_l1i_miss))
return STAT_L1I_MISS;
else if (evsel__match(evsel, HW_CACHE, hw_cache_ll_miss))
return STAT_LL_MISS;
else if (evsel__match(evsel, HW_CACHE, hw_cache_dtlb_miss))
return STAT_DTLB_MISS;
else if (evsel__match(evsel, HW_CACHE, hw_cache_itlb_miss))
return STAT_ITLB_MISS;
return STAT_NONE;
}
static const char *get_ratio_color(const double ratios[3], double val)
{
const char *color = PERF_COLOR_NORMAL;
if (val > ratios[0])
color = PERF_COLOR_RED;
else if (val > ratios[1])
color = PERF_COLOR_MAGENTA;
else if (val > ratios[2])
color = PERF_COLOR_YELLOW;
return color;
}
static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type type)
{
const struct evsel *cur;
int evsel_ctx = evsel_context(evsel);
evlist__for_each_entry(evsel->evlist, cur) {
struct perf_stat_aggr *aggr;
/* Ignore the evsel that is being searched from. */
if (evsel == cur)
continue;
/* Ignore evsels that are part of different groups. */
if (evsel->core.leader->nr_members > 1 &&
evsel->core.leader != cur->core.leader)
continue;
/* Ignore evsels with mismatched modifiers. */
if (evsel_ctx != evsel_context(cur))
continue;
/* Ignore if not the cgroup we're looking for. */
if (evsel->cgrp != cur->cgrp)
continue;
/* Ignore if not the stat we're looking for. */
if (type != evsel__stat_type(cur))
continue;
aggr = &cur->stats->aggr[aggr_idx];
if (type == STAT_NSECS)
return aggr->counts.val;
return aggr->counts.val * cur->scale;
}
return 0.0;
}
static void print_ratio(struct perf_stat_config *config,
const struct evsel *evsel, int aggr_idx,
double numerator, struct perf_stat_output_ctx *out,
enum stat_type denominator_type,
const double color_ratios[3], const char *unit)
{
double denominator = find_stat(evsel, aggr_idx, denominator_type);
if (numerator && denominator) {
double ratio = numerator / denominator * 100.0;
const char *color = get_ratio_color(color_ratios, ratio);
out->print_metric(config, out->ctx, color, "%7.2f%%", unit, ratio);
} else
out->print_metric(config, out->ctx, NULL, NULL, unit, 0);
}
static void print_stalled_cycles_front(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double stalled,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {50.0, 30.0, 10.0};
print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, color_ratios,
"frontend cycles idle");
}
static void print_stalled_cycles_back(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double stalled,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {75.0, 50.0, 20.0};
print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, color_ratios,
"backend cycles idle");
}
static void print_branch_miss(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_BRANCHES, color_ratios,
"of all branches");
}
static void print_l1d_miss(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_DCACHE, color_ratios,
"of all L1-dcache accesses");
}
static void print_l1i_miss(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_ICACHE, color_ratios,
"of all L1-icache accesses");
}
static void print_ll_miss(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, color_ratios,
"of all L1-icache accesses");
}
static void print_dtlb_miss(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_DTLB_CACHE, color_ratios,
"of all dTLB cache accesses");
}
static void print_itlb_miss(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_ITLB_CACHE, color_ratios,
"of all iTLB cache accesses");
}
static void print_cache_miss(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out)
{
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_CACHE_REFS, color_ratios,
"of all cache refs");
}
static void print_instructions(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double instructions,
struct perf_stat_output_ctx *out)
{
print_metric_t print_metric = out->print_metric;
void *ctxp = out->ctx;
double cycles = find_stat(evsel, aggr_idx, STAT_CYCLES);
double max_stalled = max(find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_FRONT),
find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_BACK));
if (cycles) {
print_metric(config, ctxp, NULL, "%7.2f ", "insn per cycle",
instructions / cycles);
} else
print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
if (max_stalled && instructions) {
out->new_line(config, ctxp);
print_metric(config, ctxp, NULL, "%7.2f ", "stalled cycles per insn",
max_stalled / instructions);
}
}
static void print_cycles(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double cycles,
struct perf_stat_output_ctx *out)
{
double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS);
if (cycles && nsecs) {
double ratio = cycles / nsecs;
out->print_metric(config, out->ctx, NULL, "%8.3f", "GHz", ratio);
} else
out->print_metric(config, out->ctx, NULL, NULL, "GHz", 0);
}
static void print_nsecs(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx __maybe_unused, double nsecs,
struct perf_stat_output_ctx *out)
{
print_metric_t print_metric = out->print_metric;
void *ctxp = out->ctx;
double wall_time = avg_stats(&walltime_nsecs_stats);
if (wall_time) {
print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
nsecs / (wall_time * evsel->scale));
} else
print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
}
static int prepare_metric(struct evsel **metric_events,
struct metric_ref *metric_refs,
struct expr_parse_ctx *pctx,
int aggr_idx)
{
int i;
for (i = 0; metric_events[i]; i++) {
char *n;
double val;
int source_count = 0;
if (evsel__is_tool(metric_events[i])) {
struct stats *stats;
double scale;
switch (metric_events[i]->tool_event) {
case PERF_TOOL_DURATION_TIME:
stats = &walltime_nsecs_stats;
scale = 1e-9;
break;
case PERF_TOOL_USER_TIME:
stats = &ru_stats.ru_utime_usec_stat;
scale = 1e-6;
break;
case PERF_TOOL_SYSTEM_TIME:
stats = &ru_stats.ru_stime_usec_stat;
scale = 1e-6;
break;
case PERF_TOOL_NONE:
pr_err("Invalid tool event 'none'");
abort();
case PERF_TOOL_MAX:
pr_err("Invalid tool event 'max'");
abort();
default:
pr_err("Unknown tool event '%s'", evsel__name(metric_events[i]));
abort();
}
val = avg_stats(stats) * scale;
source_count = 1;
} else {
struct perf_stat_evsel *ps = metric_events[i]->stats;
struct perf_stat_aggr *aggr = &ps->aggr[aggr_idx];
if (!aggr)
break;
if (!metric_events[i]->supported) {
/*
* Not supported events will have a count of 0,
* which can be confusing in a
* metric. Explicitly set the value to NAN. Not
* counted events (enable time of 0) are read as
* 0.
*/
val = NAN;
source_count = 0;
} else {
/*
* If an event was scaled during stat gathering,
* reverse the scale before computing the
* metric.
*/
val = aggr->counts.val * (1.0 / metric_events[i]->scale);
source_count = evsel__source_count(metric_events[i]);
}
}
n = strdup(evsel__metric_id(metric_events[i]));
if (!n)
return -ENOMEM;
expr__add_id_val_source_count(pctx, n, val, source_count);
}
for (int j = 0; metric_refs && metric_refs[j].metric_name; j++) {
int ret = expr__add_ref(pctx, &metric_refs[j]);
if (ret)
return ret;
}
return i;
}
static void generic_metric(struct perf_stat_config *config,
const char *metric_expr,
const char *metric_threshold,
struct evsel **metric_events,
struct metric_ref *metric_refs,
char *name,
const char *metric_name,
const char *metric_unit,
int runtime,
int aggr_idx,
struct perf_stat_output_ctx *out)
{
print_metric_t print_metric = out->print_metric;
struct expr_parse_ctx *pctx;
double ratio, scale, threshold;
int i;
void *ctxp = out->ctx;
const char *color = NULL;
pctx = expr__ctx_new();
if (!pctx)
return;
if (config->user_requested_cpu_list)
pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list);
pctx->sctx.runtime = runtime;
pctx->sctx.system_wide = config->system_wide;
i = prepare_metric(metric_events, metric_refs, pctx, aggr_idx);
if (i < 0) {
expr__ctx_free(pctx);
return;
}
if (!metric_events[i]) {
if (expr__parse(&ratio, pctx, metric_expr) == 0) {
char *unit;
char metric_bf[64];
if (metric_threshold &&
expr__parse(&threshold, pctx, metric_threshold) == 0 &&
!isnan(threshold)) {
color = fpclassify(threshold) == FP_ZERO
? PERF_COLOR_GREEN : PERF_COLOR_RED;
}
if (metric_unit && metric_name) {
if (perf_pmu__convert_scale(metric_unit,
&unit, &scale) >= 0) {
ratio *= scale;
}
if (strstr(metric_expr, "?"))
scnprintf(metric_bf, sizeof(metric_bf),
"%s %s_%d", unit, metric_name, runtime);
else
scnprintf(metric_bf, sizeof(metric_bf),
"%s %s", unit, metric_name);
print_metric(config, ctxp, color, "%8.1f",
metric_bf, ratio);
} else {
print_metric(config, ctxp, color, "%8.2f",
metric_name ?
metric_name :
out->force_header ? name : "",
ratio);
}
} else {
print_metric(config, ctxp, color, /*unit=*/NULL,
out->force_header ?
(metric_name ? metric_name : name) : "", 0);
}
} else {
print_metric(config, ctxp, color, /*unit=*/NULL,
out->force_header ?
(metric_name ? metric_name : name) : "", 0);
}
expr__ctx_free(pctx);
}
double test_generic_metric(struct metric_expr *mexp, int aggr_idx)
{
struct expr_parse_ctx *pctx;
double ratio = 0.0;
pctx = expr__ctx_new();
if (!pctx)
return NAN;
if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, aggr_idx) < 0)
goto out;
if (expr__parse(&ratio, pctx, mexp->metric_expr))
ratio = 0.0;
out:
expr__ctx_free(pctx);
return ratio;
}
static void perf_stat__print_metricgroup_header(struct perf_stat_config *config,
struct evsel *evsel,
void *ctxp,
const char *name,
struct perf_stat_output_ctx *out)
{
bool need_full_name = perf_pmus__num_core_pmus() > 1;
static const char *last_name;
static const char *last_pmu;
char full_name[64];
/*
* A metricgroup may have several metric events,
* e.g.,TopdownL1 on e-core of ADL.
* The name has been output by the first metric
* event. Only align with other metics from
* different metric events.
*/
if (last_name && !strcmp(last_name, name)) {
if (!need_full_name || !strcmp(last_pmu, evsel->pmu_name)) {
out->print_metricgroup_header(config, ctxp, NULL);
return;
}
}
if (need_full_name)
scnprintf(full_name, sizeof(full_name), "%s (%s)", name, evsel->pmu_name);
else
scnprintf(full_name, sizeof(full_name), "%s", name);
out->print_metricgroup_header(config, ctxp, full_name);
last_name = name;
last_pmu = evsel->pmu_name;
}
/**
* perf_stat__print_shadow_stats_metricgroup - Print out metrics associated with the evsel
* For the non-default, all metrics associated
* with the evsel are printed.
* For the default mode, only the metrics from
* the same metricgroup and the name of the
* metricgroup are printed. To print the metrics
* from the next metricgroup (if available),
* invoke the function with correspoinding
* metric_expr.
*/
void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config,
struct evsel *evsel,
int aggr_idx,
int *num,
void *from,
struct perf_stat_output_ctx *out,
struct rblist *metric_events)
{
struct metric_event *me;
struct metric_expr *mexp = from;
void *ctxp = out->ctx;
bool header_printed = false;
const char *name = NULL;
me = metricgroup__lookup(metric_events, evsel, false);
if (me == NULL)
return NULL;
if (!mexp)
mexp = list_first_entry(&me->head, typeof(*mexp), nd);
list_for_each_entry_from(mexp, &me->head, nd) {
/* Print the display name of the Default metricgroup */
if (!config->metric_only && me->is_default) {
if (!name)
name = mexp->default_metricgroup_name;
/*
* Two or more metricgroup may share the same metric
* event, e.g., TopdownL1 and TopdownL2 on SPR.
* Return and print the prefix, e.g., noise, running
* for the next metricgroup.
*/
if (strcmp(name, mexp->default_metricgroup_name))
return (void *)mexp;
/* Only print the name of the metricgroup once */
if (!header_printed) {
header_printed = true;
perf_stat__print_metricgroup_header(config, evsel, ctxp,
name, out);
}
}
if ((*num)++ > 0)
out->new_line(config, ctxp);
generic_metric(config, mexp->metric_expr, mexp->metric_threshold,
mexp->metric_events, mexp->metric_refs, evsel->name,
mexp->metric_name, mexp->metric_unit, mexp->runtime,
aggr_idx, out);
}
return NULL;
}
void perf_stat__print_shadow_stats(struct perf_stat_config *config,
struct evsel *evsel,
double avg, int aggr_idx,
struct perf_stat_output_ctx *out,
struct rblist *metric_events)
{
typedef void (*stat_print_function_t)(struct perf_stat_config *config,
const struct evsel *evsel,
int aggr_idx, double misses,
struct perf_stat_output_ctx *out);
static const stat_print_function_t stat_print_function[STAT_MAX] = {
[STAT_INSTRUCTIONS] = print_instructions,
[STAT_BRANCH_MISS] = print_branch_miss,
[STAT_L1D_MISS] = print_l1d_miss,
[STAT_L1I_MISS] = print_l1i_miss,
[STAT_DTLB_MISS] = print_dtlb_miss,
[STAT_ITLB_MISS] = print_itlb_miss,
[STAT_LL_MISS] = print_ll_miss,
[STAT_CACHE_MISSES] = print_cache_miss,
[STAT_STALLED_CYCLES_FRONT] = print_stalled_cycles_front,
[STAT_STALLED_CYCLES_BACK] = print_stalled_cycles_back,
[STAT_CYCLES] = print_cycles,
[STAT_NSECS] = print_nsecs,
};
print_metric_t print_metric = out->print_metric;
void *ctxp = out->ctx;
int num = 1;
if (config->iostat_run) {
iostat_print_metric(config, evsel, out);
} else {
stat_print_function_t fn = stat_print_function[evsel__stat_type(evsel)];
if (fn)
fn(config, evsel, aggr_idx, avg, out);
else {
double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS);
if (nsecs) {
char unit = ' ';
char unit_buf[10] = "/sec";
double ratio = convert_unit_double(1000000000.0 * avg / nsecs,
&unit);
if (unit != ' ')
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
} else
num = 0;
}
}
perf_stat__print_shadow_stats_metricgroup(config, evsel, aggr_idx,
&num, NULL, out, metric_events);
if (num == 0)
print_metric(config, ctxp, NULL, NULL, NULL, 0);
}
/**
* perf_stat__skip_metric_event - Skip the evsel in the Default metricgroup,
* if it's not running or not the metric event.
*/
bool perf_stat__skip_metric_event(struct evsel *evsel,
struct rblist *metric_events,
u64 ena, u64 run)
{
if (!evsel->default_metricgroup)
return false;
if (!ena || !run)
return true;
return !metricgroup__lookup(metric_events, evsel, false);
}
| linux-master | tools/perf/util/stat-shadow.c |
// SPDX-License-Identifier: GPL-2.0
/* Manage affinity to optimize IPIs inside the kernel perf API. */
#define _GNU_SOURCE 1
#include <sched.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/zalloc.h>
#include "perf.h"
#include "cpumap.h"
#include "affinity.h"
static int get_cpu_set_size(void)
{
int sz = cpu__max_cpu().cpu + 8 - 1;
/*
* sched_getaffinity doesn't like masks smaller than the kernel.
* Hopefully that's big enough.
*/
if (sz < 4096)
sz = 4096;
return sz / 8;
}
int affinity__setup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->orig_cpus)
return -1;
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->sched_cpus) {
zfree(&a->orig_cpus);
return -1;
}
bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
a->changed = false;
return 0;
}
/*
* perf_event_open does an IPI internally to the target CPU.
* It is more efficient to change perf's affinity to the target
* CPU and then set up all events on that CPU, so we amortize
* CPU communication.
*/
void affinity__set(struct affinity *a, int cpu)
{
int cpu_set_size = get_cpu_set_size();
/*
* Return:
* - if cpu is -1
* - restrict out of bound access to sched_cpus
*/
if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
return;
a->changed = true;
__set_bit(cpu, a->sched_cpus);
/*
* We ignore errors because affinity is just an optimization.
* This could happen for example with isolated CPUs or cpusets.
* In this case the IPIs inside the kernel's perf API still work.
*/
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
__clear_bit(cpu, a->sched_cpus);
}
static void __affinity__cleanup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
if (a->changed)
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
zfree(&a->sched_cpus);
zfree(&a->orig_cpus);
}
void affinity__cleanup(struct affinity *a)
{
if (a != NULL)
__affinity__cleanup(a);
}
| linux-master | tools/perf/util/affinity.c |
// SPDX-License-Identifier: GPL-2.0
#include <api/fs/fs.h>
#include "cpumap.h"
#include "debug.h"
#include "event.h"
#include <assert.h>
#include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include "asm/bug.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
#include <internal/cpumap.h>
static struct perf_cpu max_cpu_num;
static struct perf_cpu max_present_cpu_num;
static int max_node_num;
/**
* The numa node X as read from /sys/devices/system/node/nodeX indexed by the
* CPU number.
*/
static int *cpunode_map;
bool perf_record_cpu_map_data__test_bit(int i,
const struct perf_record_cpu_map_data *data)
{
int bit_word32 = i / 32;
__u32 bit_mask32 = 1U << (i & 31);
int bit_word64 = i / 64;
__u64 bit_mask64 = ((__u64)1) << (i & 63);
return (data->mask32_data.long_size == 4)
? (bit_word32 < data->mask32_data.nr) &&
(data->mask32_data.mask[bit_word32] & bit_mask32) != 0
: (bit_word64 < data->mask64_data.nr) &&
(data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
}
/* Read ith mask value from data into the given 64-bit sized bitmap */
static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
int i, unsigned long *bitmap)
{
#if __SIZEOF_LONG__ == 8
if (data->mask32_data.long_size == 4)
bitmap[0] = data->mask32_data.mask[i];
else
bitmap[0] = data->mask64_data.mask[i];
#else
if (data->mask32_data.long_size == 4) {
bitmap[0] = data->mask32_data.mask[i];
bitmap[1] = 0;
} else {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
bitmap[1] = (unsigned long)data->mask64_data.mask[i];
#else
bitmap[0] = (unsigned long)data->mask64_data.mask[i];
bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
#endif
}
#endif
}
static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
{
struct perf_cpu_map *map;
map = perf_cpu_map__empty_new(data->cpus_data.nr);
if (map) {
unsigned i;
for (i = 0; i < data->cpus_data.nr; i++) {
/*
* Special treatment for -1, which is not real cpu number,
* and we need to use (int) -1 to initialize map[i],
* otherwise it would become 65535.
*/
if (data->cpus_data.cpu[i] == (u16) -1)
RC_CHK_ACCESS(map)->map[i].cpu = -1;
else
RC_CHK_ACCESS(map)->map[i].cpu = (int) data->cpus_data.cpu[i];
}
}
return map;
}
static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
{
DECLARE_BITMAP(local_copy, 64);
int weight = 0, mask_nr = data->mask32_data.nr;
struct perf_cpu_map *map;
for (int i = 0; i < mask_nr; i++) {
perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
weight += bitmap_weight(local_copy, 64);
}
map = perf_cpu_map__empty_new(weight);
if (!map)
return NULL;
for (int i = 0, j = 0; i < mask_nr; i++) {
int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
int cpu;
perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
for_each_set_bit(cpu, local_copy, 64)
RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
}
return map;
}
static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map_data *data)
{
struct perf_cpu_map *map;
unsigned int i = 0;
map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
if (!map)
return NULL;
if (data->range_cpu_data.any_cpu)
RC_CHK_ACCESS(map)->map[i++].cpu = -1;
for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
i++, cpu++)
RC_CHK_ACCESS(map)->map[i].cpu = cpu;
return map;
}
struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
{
switch (data->type) {
case PERF_CPU_MAP__CPUS:
return cpu_map__from_entries(data);
case PERF_CPU_MAP__MASK:
return cpu_map__from_mask(data);
case PERF_CPU_MAP__RANGE_CPUS:
return cpu_map__from_range(data);
default:
pr_err("cpu_map__new_data unknown type %d\n", data->type);
return NULL;
}
}
size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
{
#define BUFSIZE 1024
char buf[BUFSIZE];
cpu_map__snprint(map, buf, sizeof(buf));
return fprintf(fp, "%s\n", buf);
#undef BUFSIZE
}
struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
{
struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr);
if (cpus != NULL) {
for (int i = 0; i < nr; i++)
RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
}
return cpus;
}
struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
{
struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
if (cpus != NULL) {
int i;
cpus->nr = nr;
for (i = 0; i < nr; i++)
cpus->map[i] = aggr_cpu_id__empty();
refcount_set(&cpus->refcnt, 1);
}
return cpus;
}
static int cpu__get_topology_int(int cpu, const char *name, int *value)
{
char path[PATH_MAX];
snprintf(path, PATH_MAX,
"devices/system/cpu/cpu%d/topology/%s", cpu, name);
return sysfs__read_int(path, value);
}
int cpu__get_socket_id(struct perf_cpu cpu)
{
int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
return ret ?: value;
}
struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
{
struct aggr_cpu_id id = aggr_cpu_id__empty();
id.socket = cpu__get_socket_id(cpu);
return id;
}
static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
{
struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
if (a->node != b->node)
return a->node - b->node;
else if (a->socket != b->socket)
return a->socket - b->socket;
else if (a->die != b->die)
return a->die - b->die;
else if (a->cache_lvl != b->cache_lvl)
return a->cache_lvl - b->cache_lvl;
else if (a->cache != b->cache)
return a->cache - b->cache;
else if (a->core != b->core)
return a->core - b->core;
else
return a->thread_idx - b->thread_idx;
}
struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
aggr_cpu_id_get_t get_id,
void *data, bool needs_sort)
{
int idx;
struct perf_cpu cpu;
struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
if (!c)
return NULL;
/* Reset size as it may only be partially filled */
c->nr = 0;
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
bool duplicate = false;
struct aggr_cpu_id cpu_id = get_id(cpu, data);
for (int j = 0; j < c->nr; j++) {
if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
duplicate = true;
break;
}
}
if (!duplicate) {
c->map[c->nr] = cpu_id;
c->nr++;
}
}
/* Trim. */
if (c->nr != perf_cpu_map__nr(cpus)) {
struct cpu_aggr_map *trimmed_c =
realloc(c,
sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
if (trimmed_c)
c = trimmed_c;
}
/* ensure we process id in increasing order */
if (needs_sort)
qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
return c;
}
int cpu__get_die_id(struct perf_cpu cpu)
{
int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
return ret ?: value;
}
struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
{
struct aggr_cpu_id id;
int die;
die = cpu__get_die_id(cpu);
/* There is no die_id on legacy system. */
if (die == -1)
die = 0;
/*
* die_id is relative to socket, so start
* with the socket ID and then add die to
* make a unique ID.
*/
id = aggr_cpu_id__socket(cpu, data);
if (aggr_cpu_id__is_empty(&id))
return id;
id.die = die;
return id;
}
int cpu__get_core_id(struct perf_cpu cpu)
{
int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
return ret ?: value;
}
struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
{
struct aggr_cpu_id id;
int core = cpu__get_core_id(cpu);
/* aggr_cpu_id__die returns a struct with socket and die set. */
id = aggr_cpu_id__die(cpu, data);
if (aggr_cpu_id__is_empty(&id))
return id;
/*
* core_id is relative to socket and die, we need a global id.
* So we combine the result from cpu_map__get_die with the core id
*/
id.core = core;
return id;
}
struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
{
struct aggr_cpu_id id;
/* aggr_cpu_id__core returns a struct with socket, die and core set. */
id = aggr_cpu_id__core(cpu, data);
if (aggr_cpu_id__is_empty(&id))
return id;
id.cpu = cpu;
return id;
}
struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
{
struct aggr_cpu_id id = aggr_cpu_id__empty();
id.node = cpu__get_node(cpu);
return id;
}
struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
{
struct aggr_cpu_id id = aggr_cpu_id__empty();
/* it always aggregates to the cpu 0 */
cpu.cpu = 0;
id.cpu = cpu;
return id;
}
/* setup simple routines to easily access node numbers given a cpu number */
static int get_max_num(char *path, int *max)
{
size_t num;
char *buf;
int err = 0;
if (filename__read_str(path, &buf, &num))
return -1;
buf[num] = '\0';
/* start on the right, to find highest node num */
while (--num) {
if ((buf[num] == ',') || (buf[num] == '-')) {
num++;
break;
}
}
if (sscanf(&buf[num], "%d", max) < 1) {
err = -1;
goto out;
}
/* convert from 0-based to 1-based */
(*max)++;
out:
free(buf);
return err;
}
/* Determine highest possible cpu in the system for sparse allocation */
static void set_max_cpu_num(void)
{
const char *mnt;
char path[PATH_MAX];
int ret = -1;
/* set up default */
max_cpu_num.cpu = 4096;
max_present_cpu_num.cpu = 4096;
mnt = sysfs__mountpoint();
if (!mnt)
goto out;
/* get the highest possible cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
ret = get_max_num(path, &max_cpu_num.cpu);
if (ret)
goto out;
/* get the highest present cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
ret = get_max_num(path, &max_present_cpu_num.cpu);
out:
if (ret)
pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
}
/* Determine highest possible node in the system for sparse allocation */
static void set_max_node_num(void)
{
const char *mnt;
char path[PATH_MAX];
int ret = -1;
/* set up default */
max_node_num = 8;
mnt = sysfs__mountpoint();
if (!mnt)
goto out;
/* get the highest possible cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
ret = get_max_num(path, &max_node_num);
out:
if (ret)
pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
}
int cpu__max_node(void)
{
if (unlikely(!max_node_num))
set_max_node_num();
return max_node_num;
}
struct perf_cpu cpu__max_cpu(void)
{
if (unlikely(!max_cpu_num.cpu))
set_max_cpu_num();
return max_cpu_num;
}
struct perf_cpu cpu__max_present_cpu(void)
{
if (unlikely(!max_present_cpu_num.cpu))
set_max_cpu_num();
return max_present_cpu_num;
}
int cpu__get_node(struct perf_cpu cpu)
{
if (unlikely(cpunode_map == NULL)) {
pr_debug("cpu_map not initialized\n");
return -1;
}
return cpunode_map[cpu.cpu];
}
static int init_cpunode_map(void)
{
int i;
set_max_cpu_num();
set_max_node_num();
cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
if (!cpunode_map) {
pr_err("%s: calloc failed\n", __func__);
return -1;
}
for (i = 0; i < max_cpu_num.cpu; i++)
cpunode_map[i] = -1;
return 0;
}
int cpu__setup_cpunode_map(void)
{
struct dirent *dent1, *dent2;
DIR *dir1, *dir2;
unsigned int cpu, mem;
char buf[PATH_MAX];
char path[PATH_MAX];
const char *mnt;
int n;
/* initialize globals */
if (init_cpunode_map())
return -1;
mnt = sysfs__mountpoint();
if (!mnt)
return 0;
n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
if (n >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
return -1;
}
dir1 = opendir(path);
if (!dir1)
return 0;
/* walk tree and setup map */
while ((dent1 = readdir(dir1)) != NULL) {
if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
continue;
n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
if (n >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
continue;
}
dir2 = opendir(buf);
if (!dir2)
continue;
while ((dent2 = readdir(dir2)) != NULL) {
if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
continue;
cpunode_map[cpu] = mem;
}
closedir(dir2);
}
closedir(dir1);
return 0;
}
size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
{
int i, start = -1;
bool first = true;
size_t ret = 0;
#define COMMA first ? "" : ","
for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
struct perf_cpu cpu = { .cpu = INT_MAX };
bool last = i == perf_cpu_map__nr(map);
if (!last)
cpu = perf_cpu_map__cpu(map, i);
if (start == -1) {
start = i;
if (last) {
ret += snprintf(buf + ret, size - ret,
"%s%d", COMMA,
perf_cpu_map__cpu(map, i).cpu);
}
} else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
int end = i - 1;
if (start == end) {
ret += snprintf(buf + ret, size - ret,
"%s%d", COMMA,
perf_cpu_map__cpu(map, start).cpu);
} else {
ret += snprintf(buf + ret, size - ret,
"%s%d-%d", COMMA,
perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu);
}
first = false;
start = i;
}
}
#undef COMMA
pr_debug2("cpumask list: %s\n", buf);
return ret;
}
static char hex_char(unsigned char val)
{
if (val < 10)
return val + '0';
if (val < 16)
return val - 10 + 'a';
return '?';
}
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
{
int i, cpu;
char *ptr = buf;
unsigned char *bitmap;
struct perf_cpu last_cpu = perf_cpu_map__cpu(map, perf_cpu_map__nr(map) - 1);
if (buf == NULL)
return 0;
bitmap = zalloc(last_cpu.cpu / 8 + 1);
if (bitmap == NULL) {
buf[0] = '\0';
return 0;
}
for (i = 0; i < perf_cpu_map__nr(map); i++) {
cpu = perf_cpu_map__cpu(map, i).cpu;
bitmap[cpu / 8] |= 1 << (cpu % 8);
}
for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
unsigned char bits = bitmap[cpu / 8];
if (cpu % 8)
bits >>= 4;
else
bits &= 0xf;
*ptr++ = hex_char(bits);
if ((cpu % 32) == 0 && cpu > 0)
*ptr++ = ',';
}
*ptr = '\0';
free(bitmap);
buf[size - 1] = '\0';
return ptr - buf;
}
struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
{
static struct perf_cpu_map *online;
if (!online)
online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
return online;
}
bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
{
return a->thread_idx == b->thread_idx &&
a->node == b->node &&
a->socket == b->socket &&
a->die == b->die &&
a->cache_lvl == b->cache_lvl &&
a->cache == b->cache &&
a->core == b->core &&
a->cpu.cpu == b->cpu.cpu;
}
bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
{
return a->thread_idx == -1 &&
a->node == -1 &&
a->socket == -1 &&
a->die == -1 &&
a->cache_lvl == -1 &&
a->cache == -1 &&
a->core == -1 &&
a->cpu.cpu == -1;
}
struct aggr_cpu_id aggr_cpu_id__empty(void)
{
struct aggr_cpu_id ret = {
.thread_idx = -1,
.node = -1,
.socket = -1,
.die = -1,
.cache_lvl = -1,
.cache = -1,
.core = -1,
.cpu = (struct perf_cpu){ .cpu = -1 },
};
return ret;
}
| linux-master | tools/perf/util/cpumap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2015-2018 Linaro Limited.
*
* Author: Tor Jeremiassen <[email protected]>
* Author: Mathieu Poirier <[email protected]>
*/
#include <asm/bug.h>
#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include <opencsd/c_api/opencsd_c_api.h>
#include "cs-etm.h"
#include "cs-etm-decoder.h"
#include "debug.h"
#include "intlist.h"
/* use raw logging */
#ifdef CS_DEBUG_RAW
#define CS_LOG_RAW_FRAMES
#ifdef CS_RAW_PACKED
#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
OCSD_DFRMTR_PACKED_RAW_OUT)
#else
#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
#endif
#endif
/*
* Assume a maximum of 0.1ns elapsed per instruction. This would be the
* case with a theoretical 10GHz core executing 1 instruction per cycle.
* Used to estimate the sample time for synthesized instructions because
* Coresight only emits a timestamp for a range of instructions rather
* than per instruction.
*/
const u32 INSTR_PER_NS = 10;
struct cs_etm_decoder {
void *data;
void (*packet_printer)(const char *msg);
bool suppress_printing;
dcd_tree_handle_t dcd_tree;
cs_etm_mem_cb_type mem_access;
ocsd_datapath_resp_t prev_return;
const char *decoder_name;
};
static u32
cs_etm_decoder__mem_access(const void *context,
const ocsd_vaddr_t address,
const ocsd_mem_space_acc_t mem_space,
const u8 trace_chan_id,
const u32 req_size,
u8 *buffer)
{
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
return decoder->mem_access(decoder->data, trace_chan_id, address,
req_size, buffer, mem_space);
}
int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
u64 start, u64 end,
cs_etm_mem_cb_type cb_func)
{
decoder->mem_access = cb_func;
if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
OCSD_MEM_SPACE_ANY,
cs_etm_decoder__mem_access,
decoder))
return -1;
return 0;
}
int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
{
ocsd_datapath_resp_t dp_ret;
decoder->prev_return = OCSD_RESP_CONT;
decoder->suppress_printing = true;
dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
0, 0, NULL, NULL);
decoder->suppress_printing = false;
if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
return -1;
return 0;
}
int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
struct cs_etm_packet *packet)
{
if (!packet_queue || !packet)
return -EINVAL;
/* Nothing to do, might as well just return */
if (packet_queue->packet_count == 0)
return 0;
/*
* The queueing process in function cs_etm_decoder__buffer_packet()
* increments the tail *before* using it. This is somewhat counter
* intuitive but it has the advantage of centralizing tail management
* at a single location. Because of that we need to follow the same
* heuristic with the head, i.e we increment it before using its
* value. Otherwise the first element of the packet queue is not
* used.
*/
packet_queue->head = (packet_queue->head + 1) &
(CS_ETM_PACKET_MAX_BUFFER - 1);
*packet = packet_queue->packet_buffer[packet_queue->head];
packet_queue->packet_count--;
return 1;
}
/*
* Calculate the number of nanoseconds elapsed.
*
* instr_count is updated in place with the remainder of the instructions
* which didn't make up a whole nanosecond.
*/
static u32 cs_etm_decoder__dec_instr_count_to_ns(u32 *instr_count)
{
const u32 instr_copy = *instr_count;
*instr_count %= INSTR_PER_NS;
return instr_copy / INSTR_PER_NS;
}
static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
ocsd_etmv3_cfg *config)
{
config->reg_idr = params->etmv3.reg_idr;
config->reg_ctrl = params->etmv3.reg_ctrl;
config->reg_ccer = params->etmv3.reg_ccer;
config->reg_trc_id = params->etmv3.reg_trc_id;
config->arch_ver = ARCH_V7;
config->core_prof = profile_CortexA;
return 0;
}
#define TRCIDR1_TRCARCHMIN_SHIFT 4
#define TRCIDR1_TRCARCHMIN_MASK GENMASK(7, 4)
#define TRCIDR1_TRCARCHMIN(x) (((x) & TRCIDR1_TRCARCHMIN_MASK) >> TRCIDR1_TRCARCHMIN_SHIFT)
static enum _ocsd_arch_version cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1)
{
/*
* For ETMv4 if the trace minor version is 4 or more then we can assume
* the architecture is ARCH_AA64 rather than just V8.
* ARCH_V8 = V8 architecture
* ARCH_AA64 = Min v8r3 plus additional AA64 PE features
*/
return TRCIDR1_TRCARCHMIN(reg_idr1) >= 4 ? ARCH_AA64 : ARCH_V8;
}
static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
ocsd_etmv4_cfg *config)
{
config->reg_configr = params->etmv4.reg_configr;
config->reg_traceidr = params->etmv4.reg_traceidr;
config->reg_idr0 = params->etmv4.reg_idr0;
config->reg_idr1 = params->etmv4.reg_idr1;
config->reg_idr2 = params->etmv4.reg_idr2;
config->reg_idr8 = params->etmv4.reg_idr8;
config->reg_idr9 = 0;
config->reg_idr10 = 0;
config->reg_idr11 = 0;
config->reg_idr12 = 0;
config->reg_idr13 = 0;
config->arch_ver = cs_etm_decoder__get_etmv4_arch_ver(params->etmv4.reg_idr1);
config->core_prof = profile_CortexA;
}
static void cs_etm_decoder__gen_ete_config(struct cs_etm_trace_params *params,
ocsd_ete_cfg *config)
{
config->reg_configr = params->ete.reg_configr;
config->reg_traceidr = params->ete.reg_traceidr;
config->reg_idr0 = params->ete.reg_idr0;
config->reg_idr1 = params->ete.reg_idr1;
config->reg_idr2 = params->ete.reg_idr2;
config->reg_idr8 = params->ete.reg_idr8;
config->reg_devarch = params->ete.reg_devarch;
config->arch_ver = ARCH_AA64;
config->core_prof = profile_CortexA;
}
static void cs_etm_decoder__print_str_cb(const void *p_context,
const char *msg,
const int str_len)
{
const struct cs_etm_decoder *decoder = p_context;
if (p_context && str_len && !decoder->suppress_printing)
decoder->packet_printer(msg);
}
static int
cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
struct cs_etm_decoder *decoder)
{
int ret = 0;
if (d_params->packet_printer == NULL)
return -1;
decoder->packet_printer = d_params->packet_printer;
/*
* Set up a library default logger to process any printers
* (packet/raw frame) we add later.
*/
ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
if (ret != 0)
return -1;
/* no stdout / err / file output */
ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
if (ret != 0)
return -1;
/*
* Set the string CB for the default logger, passes strings to
* perf print logger.
*/
ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
(void *)decoder,
cs_etm_decoder__print_str_cb);
if (ret != 0)
ret = -1;
return 0;
}
#ifdef CS_LOG_RAW_FRAMES
static void
cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
struct cs_etm_decoder *decoder)
{
/* Only log these during a --dump operation */
if (d_params->operation == CS_ETM_OPERATION_PRINT) {
/* set up a library default logger to process the
* raw frame printer we add later
*/
ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
/* no stdout / err / file output */
ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
/* set the string CB for the default logger,
* passes strings to perf print logger.
*/
ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
(void *)decoder,
cs_etm_decoder__print_str_cb);
/* use the built in library printer for the raw frames */
ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
CS_RAW_DEBUG_FLAGS);
}
}
#else
static void
cs_etm_decoder__init_raw_frame_logging(
struct cs_etm_decoder_params *d_params __maybe_unused,
struct cs_etm_decoder *decoder __maybe_unused)
{
}
#endif
static ocsd_datapath_resp_t
cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
struct cs_etm_packet_queue *packet_queue,
const uint8_t trace_chan_id)
{
u64 estimated_ts;
/* No timestamp packet has been received, nothing to do */
if (!packet_queue->next_cs_timestamp)
return OCSD_RESP_CONT;
estimated_ts = packet_queue->cs_timestamp +
cs_etm_decoder__dec_instr_count_to_ns(&packet_queue->instr_count);
/* Estimated TS can never be higher than the next real one in the trace */
packet_queue->cs_timestamp = min(packet_queue->next_cs_timestamp, estimated_ts);
/* Tell the front end which traceid_queue needs attention */
cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
return OCSD_RESP_WAIT;
}
static ocsd_datapath_resp_t
cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id,
const ocsd_trc_index_t indx)
{
struct cs_etm_packet_queue *packet_queue;
u64 converted_timestamp;
u64 estimated_first_ts;
/* First get the packet queue for this traceID */
packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
if (!packet_queue)
return OCSD_RESP_FATAL_SYS_ERR;
/*
* Coresight timestamps are raw timer values which need to be scaled to ns. Assume
* 0 is a bad value so don't try to convert it.
*/
converted_timestamp = elem->timestamp ?
cs_etm__convert_sample_time(etmq, elem->timestamp) : 0;
/*
* We've seen a timestamp packet before - simply record the new value.
* Function do_soft_timestamp() will report the value to the front end,
* hence asking the decoder to keep decoding rather than stopping.
*/
if (packet_queue->next_cs_timestamp) {
/*
* What was next is now where new ranges start from, overwriting
* any previous estimate in cs_timestamp
*/
packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
packet_queue->next_cs_timestamp = converted_timestamp;
return OCSD_RESP_CONT;
}
if (!converted_timestamp) {
/*
* Zero timestamps can be seen due to misconfiguration or hardware bugs.
* Warn once, and don't try to subtract instr_count as it would result in an
* underflow.
*/
packet_queue->cs_timestamp = 0;
if (!cs_etm__etmq_is_timeless(etmq))
pr_warning_once("Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
". Decoding may be improved by prepending 'Z' to your current --itrace arguments.\n",
indx);
} else if (packet_queue->instr_count / INSTR_PER_NS > converted_timestamp) {
/*
* Sanity check that the elem->timestamp - packet_queue->instr_count would not
* result in an underflow. Warn and clamp at 0 if it would.
*/
packet_queue->cs_timestamp = 0;
pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
} else {
/*
* This is the first timestamp we've seen since the beginning of traces
* or a discontinuity. Since timestamps packets are generated *after*
* range packets have been generated, we need to estimate the time at
* which instructions started by subtracting the number of instructions
* executed to the timestamp. Don't estimate earlier than the last used
* timestamp though.
*/
estimated_first_ts = converted_timestamp -
(packet_queue->instr_count / INSTR_PER_NS);
packet_queue->cs_timestamp = max(packet_queue->cs_timestamp, estimated_first_ts);
}
packet_queue->next_cs_timestamp = converted_timestamp;
packet_queue->instr_count = 0;
/* Tell the front end which traceid_queue needs attention */
cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
/* Halt processing until we are being told to proceed */
return OCSD_RESP_WAIT;
}
static void
cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
{
packet_queue->next_cs_timestamp = 0;
packet_queue->instr_count = 0;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
const u8 trace_chan_id,
enum cs_etm_sample_type sample_type)
{
u32 et = 0;
int cpu;
if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = packet_queue->tail;
et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
packet_queue->tail = et;
packet_queue->packet_count++;
packet_queue->packet_buffer[et].sample_type = sample_type;
packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
packet_queue->packet_buffer[et].cpu = cpu;
packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
packet_queue->packet_buffer[et].instr_count = 0;
packet_queue->packet_buffer[et].last_instr_taken_branch = false;
packet_queue->packet_buffer[et].last_instr_size = 0;
packet_queue->packet_buffer[et].last_instr_type = 0;
packet_queue->packet_buffer[et].last_instr_subtype = 0;
packet_queue->packet_buffer[et].last_instr_cond = 0;
packet_queue->packet_buffer[et].flags = 0;
packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
return OCSD_RESP_CONT;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
struct cs_etm_packet_queue *packet_queue,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
int ret = 0;
struct cs_etm_packet *packet;
ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
CS_ETM_RANGE);
if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
return ret;
packet = &packet_queue->packet_buffer[packet_queue->tail];
switch (elem->isa) {
case ocsd_isa_aarch64:
packet->isa = CS_ETM_ISA_A64;
break;
case ocsd_isa_arm:
packet->isa = CS_ETM_ISA_A32;
break;
case ocsd_isa_thumb2:
packet->isa = CS_ETM_ISA_T32;
break;
case ocsd_isa_tee:
case ocsd_isa_jazelle:
case ocsd_isa_custom:
case ocsd_isa_unknown:
default:
packet->isa = CS_ETM_ISA_UNKNOWN;
}
packet->start_addr = elem->st_addr;
packet->end_addr = elem->en_addr;
packet->instr_count = elem->num_instr_range;
packet->last_instr_type = elem->last_i_type;
packet->last_instr_subtype = elem->last_i_subtype;
packet->last_instr_cond = elem->last_instr_cond;
if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
packet->last_instr_taken_branch = elem->last_instr_exec;
else
packet->last_instr_taken_branch = false;
packet->last_instr_size = elem->last_instr_sz;
/* per-thread scenario, no need to generate a timestamp */
if (cs_etm__etmq_is_timeless(etmq))
goto out;
/*
* The packet queue is full and we haven't seen a timestamp (had we
* seen one the packet queue wouldn't be full). Let the front end
* deal with it.
*/
if (ret == OCSD_RESP_WAIT)
goto out;
packet_queue->instr_count += elem->num_instr_range;
/* Tell the front end we have a new timestamp to process */
ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
trace_chan_id);
out:
return ret;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
const uint8_t trace_chan_id)
{
/*
* Something happened and who knows when we'll get new traces so
* reset time statistics.
*/
cs_etm_decoder__reset_timestamp(queue);
return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
CS_ETM_DISCONTINUITY);
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{ int ret = 0;
struct cs_etm_packet *packet;
ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
CS_ETM_EXCEPTION);
if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
return ret;
packet = &queue->packet_buffer[queue->tail];
packet->exception_number = elem->exception_number;
return ret;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
const uint8_t trace_chan_id)
{
return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
CS_ETM_EXCEPTION_RET);
}
static ocsd_datapath_resp_t
cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
struct cs_etm_packet_queue *packet_queue,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
pid_t tid = -1;
/*
* Process the PE_CONTEXT packets if we have a valid contextID or VMID.
* If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
* as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
*/
switch (cs_etm__get_pid_fmt(etmq)) {
case CS_ETM_PIDFMT_CTXTID:
if (elem->context.ctxt_id_valid)
tid = elem->context.context_id;
break;
case CS_ETM_PIDFMT_CTXTID2:
if (elem->context.vmid_valid)
tid = elem->context.vmid;
break;
case CS_ETM_PIDFMT_NONE:
default:
break;
}
if (cs_etm__etmq_set_tid_el(etmq, tid, trace_chan_id,
elem->context.exception_level))
return OCSD_RESP_FATAL_SYS_ERR;
if (tid == -1)
return OCSD_RESP_CONT;
/*
* A timestamp is generated after a PE_CONTEXT element so make sure
* to rely on that coming one.
*/
cs_etm_decoder__reset_timestamp(packet_queue);
return OCSD_RESP_CONT;
}
static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
const void *context,
const ocsd_trc_index_t indx,
const u8 trace_chan_id __maybe_unused,
const ocsd_generic_trace_elem *elem)
{
ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
struct cs_etm_queue *etmq = decoder->data;
struct cs_etm_packet_queue *packet_queue;
/* First get the packet queue for this traceID */
packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
if (!packet_queue)
return OCSD_RESP_FATAL_SYS_ERR;
switch (elem->elem_type) {
case OCSD_GEN_TRC_ELEM_UNKNOWN:
break;
case OCSD_GEN_TRC_ELEM_EO_TRACE:
case OCSD_GEN_TRC_ELEM_NO_SYNC:
case OCSD_GEN_TRC_ELEM_TRACE_ON:
resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_TIMESTAMP:
resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
trace_chan_id,
indx);
break;
case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
resp = cs_etm_decoder__set_tid(etmq, packet_queue,
elem, trace_chan_id);
break;
/* Unused packet types */
case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
case OCSD_GEN_TRC_ELEM_ADDR_NACC:
case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
case OCSD_GEN_TRC_ELEM_EVENT:
case OCSD_GEN_TRC_ELEM_SWTRACE:
case OCSD_GEN_TRC_ELEM_CUSTOM:
case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
case OCSD_GEN_TRC_ELEM_MEMTRANS:
#if (OCSD_VER_NUM >= 0x010400)
case OCSD_GEN_TRC_ELEM_INSTRUMENTATION:
#endif
default:
break;
}
return resp;
}
static int
cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
struct cs_etm_trace_params *t_params,
struct cs_etm_decoder *decoder)
{
ocsd_etmv3_cfg config_etmv3;
ocsd_etmv4_cfg trace_config_etmv4;
ocsd_ete_cfg trace_config_ete;
void *trace_config;
u8 csid;
switch (t_params->protocol) {
case CS_ETM_PROTO_ETMV3:
case CS_ETM_PROTO_PTM:
csid = (t_params->etmv3.reg_idr & CORESIGHT_TRACE_ID_VAL_MASK);
cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
decoder->decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
OCSD_BUILTIN_DCD_ETMV3 :
OCSD_BUILTIN_DCD_PTM;
trace_config = &config_etmv3;
break;
case CS_ETM_PROTO_ETMV4i:
csid = (t_params->etmv4.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
decoder->decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
trace_config = &trace_config_etmv4;
break;
case CS_ETM_PROTO_ETE:
csid = (t_params->ete.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
cs_etm_decoder__gen_ete_config(t_params, &trace_config_ete);
decoder->decoder_name = OCSD_BUILTIN_DCD_ETE;
trace_config = &trace_config_ete;
break;
default:
return -1;
}
/* if the CPU has no trace ID associated, no decoder needed */
if (csid == CORESIGHT_TRACE_ID_UNUSED_VAL)
return 0;
if (d_params->operation == CS_ETM_OPERATION_DECODE) {
if (ocsd_dt_create_decoder(decoder->dcd_tree,
decoder->decoder_name,
OCSD_CREATE_FLG_FULL_DECODER,
trace_config, &csid))
return -1;
if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
cs_etm_decoder__gen_trace_elem_printer,
decoder))
return -1;
return 0;
} else if (d_params->operation == CS_ETM_OPERATION_PRINT) {
if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder->decoder_name,
OCSD_CREATE_FLG_PACKET_PROC,
trace_config, &csid))
return -1;
if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
return -1;
return 0;
}
return -1;
}
struct cs_etm_decoder *
cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params,
struct cs_etm_trace_params t_params[])
{
struct cs_etm_decoder *decoder;
ocsd_dcd_tree_src_t format;
u32 flags;
int i, ret;
if ((!t_params) || (!d_params))
return NULL;
decoder = zalloc(sizeof(*decoder));
if (!decoder)
return NULL;
decoder->data = d_params->data;
decoder->prev_return = OCSD_RESP_CONT;
format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
OCSD_TRC_SRC_SINGLE);
flags = 0;
flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
/*
* Drivers may add barrier frames when used with perf, set up to
* handle this. Barriers const of FSYNC packet repeated 4 times.
*/
flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
/* Create decode tree for the data source */
decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
if (decoder->dcd_tree == 0)
goto err_free_decoder;
/* init library print logging support */
ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
if (ret != 0)
goto err_free_decoder;
/* init raw frame logging if required */
cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
for (i = 0; i < decoders; i++) {
ret = cs_etm_decoder__create_etm_decoder(d_params,
&t_params[i],
decoder);
if (ret != 0)
goto err_free_decoder;
}
return decoder;
err_free_decoder:
cs_etm_decoder__free(decoder);
return NULL;
}
int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
u64 indx, const u8 *buf,
size_t len, size_t *consumed)
{
int ret = 0;
ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
ocsd_datapath_resp_t prev_return = decoder->prev_return;
size_t processed = 0;
u32 count;
while (processed < len) {
if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
cur = ocsd_dt_process_data(decoder->dcd_tree,
OCSD_OP_FLUSH,
0,
0,
NULL,
NULL);
} else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
cur = ocsd_dt_process_data(decoder->dcd_tree,
OCSD_OP_DATA,
indx + processed,
len - processed,
&buf[processed],
&count);
processed += count;
} else {
ret = -EINVAL;
break;
}
/*
* Return to the input code if the packet buffer is full.
* Flushing will get done once the packet buffer has been
* processed.
*/
if (OCSD_DATA_RESP_IS_WAIT(cur))
break;
prev_return = cur;
}
decoder->prev_return = cur;
*consumed = processed;
return ret;
}
void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
{
if (!decoder)
return;
ocsd_destroy_dcd_tree(decoder->dcd_tree);
decoder->dcd_tree = NULL;
free(decoder);
}
const char *cs_etm_decoder__get_name(struct cs_etm_decoder *decoder)
{
return decoder->decoder_name;
}
| linux-master | tools/perf/util/cs-etm-decoder/cs-etm-decoder.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Arm Statistical Profiling Extensions (SPE) support
* Copyright (c) 2017-2018, Arm Ltd.
*/
#include <stdio.h>
#include <string.h>
#include <endian.h>
#include <byteswap.h>
#include <linux/bitops.h>
#include <stdarg.h>
#include "arm-spe-pkt-decoder.h"
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define le16_to_cpu bswap_16
#define le32_to_cpu bswap_32
#define le64_to_cpu bswap_64
#define memcpy_le64(d, s, n) do { \
memcpy((d), (s), (n)); \
*(d) = le64_to_cpu(*(d)); \
} while (0)
#else
#define le16_to_cpu
#define le32_to_cpu
#define le64_to_cpu
#define memcpy_le64 memcpy
#endif
static const char * const arm_spe_packet_name[] = {
[ARM_SPE_PAD] = "PAD",
[ARM_SPE_END] = "END",
[ARM_SPE_TIMESTAMP] = "TS",
[ARM_SPE_ADDRESS] = "ADDR",
[ARM_SPE_COUNTER] = "LAT",
[ARM_SPE_CONTEXT] = "CONTEXT",
[ARM_SPE_OP_TYPE] = "OP-TYPE",
[ARM_SPE_EVENTS] = "EVENTS",
[ARM_SPE_DATA_SOURCE] = "DATA-SOURCE",
};
const char *arm_spe_pkt_name(enum arm_spe_pkt_type type)
{
return arm_spe_packet_name[type];
}
/*
* Extracts the field "sz" from header bits and converts to bytes:
* 00 : byte (1)
* 01 : halfword (2)
* 10 : word (4)
* 11 : doubleword (8)
*/
static unsigned int arm_spe_payload_len(unsigned char hdr)
{
return 1U << ((hdr & GENMASK_ULL(5, 4)) >> 4);
}
static int arm_spe_get_payload(const unsigned char *buf, size_t len,
unsigned char ext_hdr,
struct arm_spe_pkt *packet)
{
size_t payload_len = arm_spe_payload_len(buf[ext_hdr]);
if (len < 1 + ext_hdr + payload_len)
return ARM_SPE_NEED_MORE_BYTES;
buf += 1 + ext_hdr;
switch (payload_len) {
case 1: packet->payload = *(uint8_t *)buf; break;
case 2: packet->payload = le16_to_cpu(*(uint16_t *)buf); break;
case 4: packet->payload = le32_to_cpu(*(uint32_t *)buf); break;
case 8: packet->payload = le64_to_cpu(*(uint64_t *)buf); break;
default: return ARM_SPE_BAD_PACKET;
}
return 1 + ext_hdr + payload_len;
}
static int arm_spe_get_pad(struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_PAD;
return 1;
}
static int arm_spe_get_alignment(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
unsigned int alignment = 1 << ((buf[0] & 0xf) + 1);
if (len < alignment)
return ARM_SPE_NEED_MORE_BYTES;
packet->type = ARM_SPE_PAD;
return alignment - (((uintptr_t)buf) & (alignment - 1));
}
static int arm_spe_get_end(struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_END;
return 1;
}
static int arm_spe_get_timestamp(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_TIMESTAMP;
return arm_spe_get_payload(buf, len, 0, packet);
}
static int arm_spe_get_events(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_EVENTS;
/* we use index to identify Events with a less number of
* comparisons in arm_spe_pkt_desc(): E.g., the LLC-ACCESS,
* LLC-REFILL, and REMOTE-ACCESS events are identified if
* index > 1.
*/
packet->index = arm_spe_payload_len(buf[0]);
return arm_spe_get_payload(buf, len, 0, packet);
}
static int arm_spe_get_data_source(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_DATA_SOURCE;
return arm_spe_get_payload(buf, len, 0, packet);
}
static int arm_spe_get_context(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_CONTEXT;
packet->index = SPE_CTX_PKT_HDR_INDEX(buf[0]);
return arm_spe_get_payload(buf, len, 0, packet);
}
static int arm_spe_get_op_type(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_OP_TYPE;
packet->index = SPE_OP_PKT_HDR_CLASS(buf[0]);
return arm_spe_get_payload(buf, len, 0, packet);
}
static int arm_spe_get_counter(const unsigned char *buf, size_t len,
const unsigned char ext_hdr, struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_COUNTER;
if (ext_hdr)
packet->index = SPE_HDR_EXTENDED_INDEX(buf[0], buf[1]);
else
packet->index = SPE_HDR_SHORT_INDEX(buf[0]);
return arm_spe_get_payload(buf, len, ext_hdr, packet);
}
static int arm_spe_get_addr(const unsigned char *buf, size_t len,
const unsigned char ext_hdr, struct arm_spe_pkt *packet)
{
packet->type = ARM_SPE_ADDRESS;
if (ext_hdr)
packet->index = SPE_HDR_EXTENDED_INDEX(buf[0], buf[1]);
else
packet->index = SPE_HDR_SHORT_INDEX(buf[0]);
return arm_spe_get_payload(buf, len, ext_hdr, packet);
}
static int arm_spe_do_get_packet(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
unsigned int hdr;
unsigned char ext_hdr = 0;
memset(packet, 0, sizeof(struct arm_spe_pkt));
if (!len)
return ARM_SPE_NEED_MORE_BYTES;
hdr = buf[0];
if (hdr == SPE_HEADER0_PAD)
return arm_spe_get_pad(packet);
if (hdr == SPE_HEADER0_END) /* no timestamp at end of record */
return arm_spe_get_end(packet);
if (hdr == SPE_HEADER0_TIMESTAMP)
return arm_spe_get_timestamp(buf, len, packet);
if ((hdr & SPE_HEADER0_MASK1) == SPE_HEADER0_EVENTS)
return arm_spe_get_events(buf, len, packet);
if ((hdr & SPE_HEADER0_MASK1) == SPE_HEADER0_SOURCE)
return arm_spe_get_data_source(buf, len, packet);
if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_CONTEXT)
return arm_spe_get_context(buf, len, packet);
if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_OP_TYPE)
return arm_spe_get_op_type(buf, len, packet);
if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_EXTENDED) {
/* 16-bit extended format header */
if (len == 1)
return ARM_SPE_BAD_PACKET;
ext_hdr = 1;
hdr = buf[1];
if (hdr == SPE_HEADER1_ALIGNMENT)
return arm_spe_get_alignment(buf, len, packet);
}
/*
* The short format header's byte 0 or the extended format header's
* byte 1 has been assigned to 'hdr', which uses the same encoding for
* address packet and counter packet, so don't need to distinguish if
* it's short format or extended format and handle in once.
*/
if ((hdr & SPE_HEADER0_MASK3) == SPE_HEADER0_ADDRESS)
return arm_spe_get_addr(buf, len, ext_hdr, packet);
if ((hdr & SPE_HEADER0_MASK3) == SPE_HEADER0_COUNTER)
return arm_spe_get_counter(buf, len, ext_hdr, packet);
return ARM_SPE_BAD_PACKET;
}
int arm_spe_get_packet(const unsigned char *buf, size_t len,
struct arm_spe_pkt *packet)
{
int ret;
ret = arm_spe_do_get_packet(buf, len, packet);
/* put multiple consecutive PADs on the same line, up to
* the fixed-width output format of 16 bytes per line.
*/
if (ret > 0 && packet->type == ARM_SPE_PAD) {
while (ret < 16 && len > (size_t)ret && !buf[ret])
ret += 1;
}
return ret;
}
static int arm_spe_pkt_out_string(int *err, char **buf_p, size_t *blen,
const char *fmt, ...)
{
va_list ap;
int ret;
/* Bail out if any error occurred */
if (err && *err)
return *err;
va_start(ap, fmt);
ret = vsnprintf(*buf_p, *blen, fmt, ap);
va_end(ap);
if (ret < 0) {
if (err && !*err)
*err = ret;
/*
* A return value of *blen or more means that the output was
* truncated and the buffer is overrun.
*/
} else if ((size_t)ret >= *blen) {
(*buf_p)[*blen - 1] = '\0';
/*
* Set *err to 'ret' to avoid overflow if tries to
* fill this buffer sequentially.
*/
if (err && !*err)
*err = ret;
} else {
*buf_p += ret;
*blen -= ret;
}
return ret;
}
static int arm_spe_pkt_desc_event(const struct arm_spe_pkt *packet,
char *buf, size_t buf_len)
{
u64 payload = packet->payload;
int err = 0;
arm_spe_pkt_out_string(&err, &buf, &buf_len, "EV");
if (payload & BIT(EV_EXCEPTION_GEN))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " EXCEPTION-GEN");
if (payload & BIT(EV_RETIRED))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " RETIRED");
if (payload & BIT(EV_L1D_ACCESS))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " L1D-ACCESS");
if (payload & BIT(EV_L1D_REFILL))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " L1D-REFILL");
if (payload & BIT(EV_TLB_ACCESS))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " TLB-ACCESS");
if (payload & BIT(EV_TLB_WALK))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " TLB-REFILL");
if (payload & BIT(EV_NOT_TAKEN))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " NOT-TAKEN");
if (payload & BIT(EV_MISPRED))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " MISPRED");
if (payload & BIT(EV_LLC_ACCESS))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " LLC-ACCESS");
if (payload & BIT(EV_LLC_MISS))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " LLC-REFILL");
if (payload & BIT(EV_REMOTE_ACCESS))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " REMOTE-ACCESS");
if (payload & BIT(EV_ALIGNMENT))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " ALIGNMENT");
if (payload & BIT(EV_PARTIAL_PREDICATE))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SVE-PARTIAL-PRED");
if (payload & BIT(EV_EMPTY_PREDICATE))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SVE-EMPTY-PRED");
return err;
}
static int arm_spe_pkt_desc_op_type(const struct arm_spe_pkt *packet,
char *buf, size_t buf_len)
{
u64 payload = packet->payload;
int err = 0;
switch (packet->index) {
case SPE_OP_PKT_HDR_CLASS_OTHER:
if (SPE_OP_PKT_IS_OTHER_SVE_OP(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, "SVE-OTHER");
/* SVE effective vector length */
arm_spe_pkt_out_string(&err, &buf, &buf_len, " EVLEN %d",
SPE_OP_PKG_SVE_EVL(payload));
if (payload & SPE_OP_PKT_SVE_FP)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " FP");
if (payload & SPE_OP_PKT_SVE_PRED)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " PRED");
} else {
arm_spe_pkt_out_string(&err, &buf, &buf_len, "OTHER");
arm_spe_pkt_out_string(&err, &buf, &buf_len, " %s",
payload & SPE_OP_PKT_COND ?
"COND-SELECT" : "INSN-OTHER");
}
break;
case SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC:
arm_spe_pkt_out_string(&err, &buf, &buf_len,
payload & 0x1 ? "ST" : "LD");
if (SPE_OP_PKT_IS_LDST_ATOMIC(payload)) {
if (payload & SPE_OP_PKT_AT)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " AT");
if (payload & SPE_OP_PKT_EXCL)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " EXCL");
if (payload & SPE_OP_PKT_AR)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " AR");
}
switch (SPE_OP_PKT_LDST_SUBCLASS_GET(payload)) {
case SPE_OP_PKT_LDST_SUBCLASS_SIMD_FP:
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SIMD-FP");
break;
case SPE_OP_PKT_LDST_SUBCLASS_GP_REG:
arm_spe_pkt_out_string(&err, &buf, &buf_len, " GP-REG");
break;
case SPE_OP_PKT_LDST_SUBCLASS_UNSPEC_REG:
arm_spe_pkt_out_string(&err, &buf, &buf_len, " UNSPEC-REG");
break;
case SPE_OP_PKT_LDST_SUBCLASS_NV_SYSREG:
arm_spe_pkt_out_string(&err, &buf, &buf_len, " NV-SYSREG");
break;
case SPE_OP_PKT_LDST_SUBCLASS_MTE_TAG:
arm_spe_pkt_out_string(&err, &buf, &buf_len, " MTE-TAG");
break;
case SPE_OP_PKT_LDST_SUBCLASS_MEMCPY:
arm_spe_pkt_out_string(&err, &buf, &buf_len, " MEMCPY");
break;
case SPE_OP_PKT_LDST_SUBCLASS_MEMSET:
arm_spe_pkt_out_string(&err, &buf, &buf_len, " MEMSET");
break;
default:
break;
}
if (SPE_OP_PKT_IS_LDST_SVE(payload)) {
/* SVE effective vector length */
arm_spe_pkt_out_string(&err, &buf, &buf_len, " EVLEN %d",
SPE_OP_PKG_SVE_EVL(payload));
if (payload & SPE_OP_PKT_SVE_PRED)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " PRED");
if (payload & SPE_OP_PKT_SVE_SG)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SG");
}
break;
case SPE_OP_PKT_HDR_CLASS_BR_ERET:
arm_spe_pkt_out_string(&err, &buf, &buf_len, "B");
if (payload & SPE_OP_PKT_COND)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " COND");
if (SPE_OP_PKT_IS_INDIRECT_BRANCH(payload))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " IND");
break;
default:
/* Unknown index */
err = -1;
break;
}
return err;
}
static int arm_spe_pkt_desc_addr(const struct arm_spe_pkt *packet,
char *buf, size_t buf_len)
{
int ns, el, idx = packet->index;
int ch, pat;
u64 payload = packet->payload;
int err = 0;
static const char *idx_name[] = {"PC", "TGT", "VA", "PA", "PBT"};
switch (idx) {
case SPE_ADDR_PKT_HDR_INDEX_INS:
case SPE_ADDR_PKT_HDR_INDEX_BRANCH:
case SPE_ADDR_PKT_HDR_INDEX_PREV_BRANCH:
ns = !!SPE_ADDR_PKT_GET_NS(payload);
el = SPE_ADDR_PKT_GET_EL(payload);
payload = SPE_ADDR_PKT_ADDR_GET_BYTES_0_6(payload);
arm_spe_pkt_out_string(&err, &buf, &buf_len,
"%s 0x%llx el%d ns=%d",
idx_name[idx], payload, el, ns);
break;
case SPE_ADDR_PKT_HDR_INDEX_DATA_VIRT:
arm_spe_pkt_out_string(&err, &buf, &buf_len,
"VA 0x%llx", payload);
break;
case SPE_ADDR_PKT_HDR_INDEX_DATA_PHYS:
ns = !!SPE_ADDR_PKT_GET_NS(payload);
ch = !!SPE_ADDR_PKT_GET_CH(payload);
pat = SPE_ADDR_PKT_GET_PAT(payload);
payload = SPE_ADDR_PKT_ADDR_GET_BYTES_0_6(payload);
arm_spe_pkt_out_string(&err, &buf, &buf_len,
"PA 0x%llx ns=%d ch=%d pat=%x",
payload, ns, ch, pat);
break;
default:
/* Unknown index */
err = -1;
break;
}
return err;
}
static int arm_spe_pkt_desc_counter(const struct arm_spe_pkt *packet,
char *buf, size_t buf_len)
{
u64 payload = packet->payload;
const char *name = arm_spe_pkt_name(packet->type);
int err = 0;
arm_spe_pkt_out_string(&err, &buf, &buf_len, "%s %d ", name,
(unsigned short)payload);
switch (packet->index) {
case SPE_CNT_PKT_HDR_INDEX_TOTAL_LAT:
arm_spe_pkt_out_string(&err, &buf, &buf_len, "TOT");
break;
case SPE_CNT_PKT_HDR_INDEX_ISSUE_LAT:
arm_spe_pkt_out_string(&err, &buf, &buf_len, "ISSUE");
break;
case SPE_CNT_PKT_HDR_INDEX_TRANS_LAT:
arm_spe_pkt_out_string(&err, &buf, &buf_len, "XLAT");
break;
default:
break;
}
return err;
}
int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf,
size_t buf_len)
{
int idx = packet->index;
unsigned long long payload = packet->payload;
const char *name = arm_spe_pkt_name(packet->type);
char *buf_orig = buf;
size_t blen = buf_len;
int err = 0;
switch (packet->type) {
case ARM_SPE_BAD:
case ARM_SPE_PAD:
case ARM_SPE_END:
arm_spe_pkt_out_string(&err, &buf, &blen, "%s", name);
break;
case ARM_SPE_EVENTS:
err = arm_spe_pkt_desc_event(packet, buf, buf_len);
break;
case ARM_SPE_OP_TYPE:
err = arm_spe_pkt_desc_op_type(packet, buf, buf_len);
break;
case ARM_SPE_DATA_SOURCE:
case ARM_SPE_TIMESTAMP:
arm_spe_pkt_out_string(&err, &buf, &blen, "%s %lld", name, payload);
break;
case ARM_SPE_ADDRESS:
err = arm_spe_pkt_desc_addr(packet, buf, buf_len);
break;
case ARM_SPE_CONTEXT:
arm_spe_pkt_out_string(&err, &buf, &blen, "%s 0x%lx el%d",
name, (unsigned long)payload, idx + 1);
break;
case ARM_SPE_COUNTER:
err = arm_spe_pkt_desc_counter(packet, buf, buf_len);
break;
default:
/* Unknown packet type */
err = -1;
break;
}
/* Output raw data if detect any error */
if (err) {
err = 0;
arm_spe_pkt_out_string(&err, &buf_orig, &buf_len, "%s 0x%llx (%d)",
name, payload, packet->index);
}
return err;
}
| linux-master | tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arm_spe_decoder.c: ARM SPE support
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <inttypes.h>
#include <stdbool.h>
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/zalloc.h>
#include "../auxtrace.h"
#include "../debug.h"
#include "../util.h"
#include "arm-spe-decoder.h"
static u64 arm_spe_calc_ip(int index, u64 payload)
{
u64 ns, el, val;
/* Instruction virtual address or Branch target address */
if (index == SPE_ADDR_PKT_HDR_INDEX_INS ||
index == SPE_ADDR_PKT_HDR_INDEX_BRANCH) {
ns = SPE_ADDR_PKT_GET_NS(payload);
el = SPE_ADDR_PKT_GET_EL(payload);
/* Clean highest byte */
payload = SPE_ADDR_PKT_ADDR_GET_BYTES_0_6(payload);
/* Fill highest byte for EL1 or EL2 (VHE) mode */
if (ns && (el == SPE_ADDR_PKT_EL1 || el == SPE_ADDR_PKT_EL2))
payload |= 0xffULL << SPE_ADDR_PKT_ADDR_BYTE7_SHIFT;
/* Data access virtual address */
} else if (index == SPE_ADDR_PKT_HDR_INDEX_DATA_VIRT) {
/* Clean tags */
payload = SPE_ADDR_PKT_ADDR_GET_BYTES_0_6(payload);
/*
* Armv8 ARM (ARM DDI 0487F.c), chapter "D10.2.1 Address packet"
* defines the data virtual address payload format, the top byte
* (bits [63:56]) is assigned as top-byte tag; so we only can
* retrieve address value from bits [55:0].
*
* According to Documentation/arch/arm64/memory.rst, if detects the
* specific pattern in bits [55:52] of payload which falls in
* the kernel space, should fixup the top byte and this allows
* perf tool to parse DSO symbol for data address correctly.
*
* For this reason, if detects the bits [55:52] is 0xf, will
* fill 0xff into the top byte.
*/
val = SPE_ADDR_PKT_ADDR_GET_BYTE_6(payload);
if ((val & 0xf0ULL) == 0xf0ULL)
payload |= 0xffULL << SPE_ADDR_PKT_ADDR_BYTE7_SHIFT;
/* Data access physical address */
} else if (index == SPE_ADDR_PKT_HDR_INDEX_DATA_PHYS) {
/* Clean highest byte */
payload = SPE_ADDR_PKT_ADDR_GET_BYTES_0_6(payload);
} else {
static u32 seen_idx = 0;
if (!(seen_idx & BIT(index))) {
seen_idx |= BIT(index);
pr_warning("ignoring unsupported address packet index: 0x%x\n", index);
}
}
return payload;
}
struct arm_spe_decoder *arm_spe_decoder_new(struct arm_spe_params *params)
{
struct arm_spe_decoder *decoder;
if (!params->get_trace)
return NULL;
decoder = zalloc(sizeof(struct arm_spe_decoder));
if (!decoder)
return NULL;
decoder->get_trace = params->get_trace;
decoder->data = params->data;
return decoder;
}
void arm_spe_decoder_free(struct arm_spe_decoder *decoder)
{
free(decoder);
}
static int arm_spe_get_data(struct arm_spe_decoder *decoder)
{
struct arm_spe_buffer buffer = { .buf = 0, };
int ret;
pr_debug("Getting more data\n");
ret = decoder->get_trace(&buffer, decoder->data);
if (ret < 0)
return ret;
decoder->buf = buffer.buf;
decoder->len = buffer.len;
if (!decoder->len)
pr_debug("No more data\n");
return decoder->len;
}
static int arm_spe_get_next_packet(struct arm_spe_decoder *decoder)
{
int ret;
do {
if (!decoder->len) {
ret = arm_spe_get_data(decoder);
/* Failed to read out trace data */
if (ret <= 0)
return ret;
}
ret = arm_spe_get_packet(decoder->buf, decoder->len,
&decoder->packet);
if (ret <= 0) {
/* Move forward for 1 byte */
decoder->buf += 1;
decoder->len -= 1;
return -EBADMSG;
}
decoder->buf += ret;
decoder->len -= ret;
} while (decoder->packet.type == ARM_SPE_PAD);
return 1;
}
static int arm_spe_read_record(struct arm_spe_decoder *decoder)
{
int err;
int idx;
u64 payload, ip;
memset(&decoder->record, 0x0, sizeof(decoder->record));
decoder->record.context_id = (u64)-1;
while (1) {
err = arm_spe_get_next_packet(decoder);
if (err <= 0)
return err;
idx = decoder->packet.index;
payload = decoder->packet.payload;
switch (decoder->packet.type) {
case ARM_SPE_TIMESTAMP:
decoder->record.timestamp = payload;
return 1;
case ARM_SPE_END:
return 1;
case ARM_SPE_ADDRESS:
ip = arm_spe_calc_ip(idx, payload);
if (idx == SPE_ADDR_PKT_HDR_INDEX_INS)
decoder->record.from_ip = ip;
else if (idx == SPE_ADDR_PKT_HDR_INDEX_BRANCH)
decoder->record.to_ip = ip;
else if (idx == SPE_ADDR_PKT_HDR_INDEX_DATA_VIRT)
decoder->record.virt_addr = ip;
else if (idx == SPE_ADDR_PKT_HDR_INDEX_DATA_PHYS)
decoder->record.phys_addr = ip;
break;
case ARM_SPE_COUNTER:
if (idx == SPE_CNT_PKT_HDR_INDEX_TOTAL_LAT)
decoder->record.latency = payload;
break;
case ARM_SPE_CONTEXT:
decoder->record.context_id = payload;
break;
case ARM_SPE_OP_TYPE:
switch (idx) {
case SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC:
decoder->record.op |= ARM_SPE_OP_LDST;
if (payload & SPE_OP_PKT_ST)
decoder->record.op |= ARM_SPE_OP_ST;
else
decoder->record.op |= ARM_SPE_OP_LD;
if (SPE_OP_PKT_IS_LDST_SVE(payload))
decoder->record.op |= ARM_SPE_OP_SVE_LDST;
break;
case SPE_OP_PKT_HDR_CLASS_OTHER:
decoder->record.op |= ARM_SPE_OP_OTHER;
if (SPE_OP_PKT_IS_OTHER_SVE_OP(payload))
decoder->record.op |= ARM_SPE_OP_SVE_OTHER;
break;
case SPE_OP_PKT_HDR_CLASS_BR_ERET:
decoder->record.op |= ARM_SPE_OP_BRANCH_ERET;
break;
default:
pr_err("Get packet error!\n");
return -1;
}
break;
case ARM_SPE_EVENTS:
if (payload & BIT(EV_L1D_REFILL))
decoder->record.type |= ARM_SPE_L1D_MISS;
if (payload & BIT(EV_L1D_ACCESS))
decoder->record.type |= ARM_SPE_L1D_ACCESS;
if (payload & BIT(EV_TLB_WALK))
decoder->record.type |= ARM_SPE_TLB_MISS;
if (payload & BIT(EV_TLB_ACCESS))
decoder->record.type |= ARM_SPE_TLB_ACCESS;
if (payload & BIT(EV_LLC_MISS))
decoder->record.type |= ARM_SPE_LLC_MISS;
if (payload & BIT(EV_LLC_ACCESS))
decoder->record.type |= ARM_SPE_LLC_ACCESS;
if (payload & BIT(EV_REMOTE_ACCESS))
decoder->record.type |= ARM_SPE_REMOTE_ACCESS;
if (payload & BIT(EV_MISPRED))
decoder->record.type |= ARM_SPE_BRANCH_MISS;
if (payload & BIT(EV_PARTIAL_PREDICATE))
decoder->record.type |= ARM_SPE_SVE_PARTIAL_PRED;
if (payload & BIT(EV_EMPTY_PREDICATE))
decoder->record.type |= ARM_SPE_SVE_EMPTY_PRED;
break;
case ARM_SPE_DATA_SOURCE:
decoder->record.source = payload;
break;
case ARM_SPE_BAD:
break;
case ARM_SPE_PAD:
break;
default:
pr_err("Get packet error!\n");
return -1;
}
}
return 0;
}
int arm_spe_decode(struct arm_spe_decoder *decoder)
{
return arm_spe_read_record(decoder);
}
| linux-master | tools/perf/util/arm-spe-decoder/arm-spe-decoder.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2022, Huawei
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define KWORK_COUNT 100
#define MAX_KWORKNAME 128
/*
* This should be in sync with "util/kwork.h"
*/
enum kwork_class_type {
KWORK_CLASS_IRQ,
KWORK_CLASS_SOFTIRQ,
KWORK_CLASS_WORKQUEUE,
KWORK_CLASS_MAX,
};
struct work_key {
__u32 type;
__u32 cpu;
__u64 id;
};
struct report_data {
__u64 nr;
__u64 total_time;
__u64 max_time;
__u64 max_time_start;
__u64 max_time_end;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(struct work_key));
__uint(value_size, MAX_KWORKNAME);
__uint(max_entries, KWORK_COUNT);
} perf_kwork_names SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(struct work_key));
__uint(value_size, sizeof(__u64));
__uint(max_entries, KWORK_COUNT);
} perf_kwork_time SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(struct work_key));
__uint(value_size, sizeof(struct report_data));
__uint(max_entries, KWORK_COUNT);
} perf_kwork_report SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} perf_kwork_cpu_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, MAX_KWORKNAME);
__uint(max_entries, 1);
} perf_kwork_name_filter SEC(".maps");
int enabled = 0;
int has_cpu_filter = 0;
int has_name_filter = 0;
static __always_inline int local_strncmp(const char *s1,
unsigned int sz, const char *s2)
{
int ret = 0;
unsigned int i;
for (i = 0; i < sz; i++) {
ret = (unsigned char)s1[i] - (unsigned char)s2[i];
if (ret || !s1[i] || !s2[i])
break;
}
return ret;
}
static __always_inline int trace_event_match(struct work_key *key, char *name)
{
__u8 *cpu_val;
char *name_val;
__u32 zero = 0;
__u32 cpu = bpf_get_smp_processor_id();
if (!enabled)
return 0;
if (has_cpu_filter) {
cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu);
if (!cpu_val)
return 0;
}
if (has_name_filter && (name != NULL)) {
name_val = bpf_map_lookup_elem(&perf_kwork_name_filter, &zero);
if (name_val &&
(local_strncmp(name_val, MAX_KWORKNAME, name) != 0)) {
return 0;
}
}
return 1;
}
static __always_inline void do_update_time(void *map, struct work_key *key,
__u64 time_start, __u64 time_end)
{
struct report_data zero, *data;
__s64 delta = time_end - time_start;
if (delta < 0)
return;
data = bpf_map_lookup_elem(map, key);
if (!data) {
__builtin_memset(&zero, 0, sizeof(zero));
bpf_map_update_elem(map, key, &zero, BPF_NOEXIST);
data = bpf_map_lookup_elem(map, key);
if (!data)
return;
}
if ((delta > data->max_time) ||
(data->max_time == 0)) {
data->max_time = delta;
data->max_time_start = time_start;
data->max_time_end = time_end;
}
data->total_time += delta;
data->nr++;
}
static __always_inline void do_update_timestart(void *map, struct work_key *key)
{
__u64 ts = bpf_ktime_get_ns();
bpf_map_update_elem(map, key, &ts, BPF_ANY);
}
static __always_inline void do_update_timeend(void *report_map, void *time_map,
struct work_key *key)
{
__u64 *time = bpf_map_lookup_elem(time_map, key);
if (time) {
bpf_map_delete_elem(time_map, key);
do_update_time(report_map, key, *time, bpf_ktime_get_ns());
}
}
static __always_inline void do_update_name(void *map,
struct work_key *key, char *name)
{
if (!bpf_map_lookup_elem(map, key))
bpf_map_update_elem(map, key, name, BPF_ANY);
}
static __always_inline int update_timestart(void *map, struct work_key *key)
{
if (!trace_event_match(key, NULL))
return 0;
do_update_timestart(map, key);
return 0;
}
static __always_inline int update_timestart_and_name(void *time_map,
void *names_map,
struct work_key *key,
char *name)
{
if (!trace_event_match(key, name))
return 0;
do_update_timestart(time_map, key);
do_update_name(names_map, key, name);
return 0;
}
static __always_inline int update_timeend(void *report_map,
void *time_map, struct work_key *key)
{
if (!trace_event_match(key, NULL))
return 0;
do_update_timeend(report_map, time_map, key);
return 0;
}
static __always_inline int update_timeend_and_name(void *report_map,
void *time_map,
void *names_map,
struct work_key *key,
char *name)
{
if (!trace_event_match(key, name))
return 0;
do_update_timeend(report_map, time_map, key);
do_update_name(names_map, key, name);
return 0;
}
SEC("tracepoint/irq/irq_handler_entry")
int report_irq_handler_entry(struct trace_event_raw_irq_handler_entry *ctx)
{
char name[MAX_KWORKNAME];
struct work_key key = {
.type = KWORK_CLASS_IRQ,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->irq,
};
void *name_addr = (void *)ctx + (ctx->__data_loc_name & 0xffff);
bpf_probe_read_kernel_str(name, sizeof(name), name_addr);
return update_timestart_and_name(&perf_kwork_time,
&perf_kwork_names, &key, name);
}
SEC("tracepoint/irq/irq_handler_exit")
int report_irq_handler_exit(struct trace_event_raw_irq_handler_exit *ctx)
{
struct work_key key = {
.type = KWORK_CLASS_IRQ,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->irq,
};
return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
}
static char softirq_name_list[NR_SOFTIRQS][MAX_KWORKNAME] = {
{ "HI" },
{ "TIMER" },
{ "NET_TX" },
{ "NET_RX" },
{ "BLOCK" },
{ "IRQ_POLL" },
{ "TASKLET" },
{ "SCHED" },
{ "HRTIMER" },
{ "RCU" },
};
SEC("tracepoint/irq/softirq_entry")
int report_softirq_entry(struct trace_event_raw_softirq *ctx)
{
unsigned int vec = ctx->vec;
struct work_key key = {
.type = KWORK_CLASS_SOFTIRQ,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)vec,
};
if (vec < NR_SOFTIRQS) {
return update_timestart_and_name(&perf_kwork_time,
&perf_kwork_names, &key,
softirq_name_list[vec]);
}
return 0;
}
SEC("tracepoint/irq/softirq_exit")
int report_softirq_exit(struct trace_event_raw_softirq *ctx)
{
struct work_key key = {
.type = KWORK_CLASS_SOFTIRQ,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->vec,
};
return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
}
SEC("tracepoint/irq/softirq_raise")
int latency_softirq_raise(struct trace_event_raw_softirq *ctx)
{
unsigned int vec = ctx->vec;
struct work_key key = {
.type = KWORK_CLASS_SOFTIRQ,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)vec,
};
if (vec < NR_SOFTIRQS) {
return update_timestart_and_name(&perf_kwork_time,
&perf_kwork_names, &key,
softirq_name_list[vec]);
}
return 0;
}
SEC("tracepoint/irq/softirq_entry")
int latency_softirq_entry(struct trace_event_raw_softirq *ctx)
{
struct work_key key = {
.type = KWORK_CLASS_SOFTIRQ,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->vec,
};
return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
}
SEC("tracepoint/workqueue/workqueue_execute_start")
int report_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
{
struct work_key key = {
.type = KWORK_CLASS_WORKQUEUE,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->work,
};
return update_timestart(&perf_kwork_time, &key);
}
SEC("tracepoint/workqueue/workqueue_execute_end")
int report_workqueue_execute_end(struct trace_event_raw_workqueue_execute_end *ctx)
{
char name[MAX_KWORKNAME];
struct work_key key = {
.type = KWORK_CLASS_WORKQUEUE,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->work,
};
unsigned long long func_addr = (unsigned long long)ctx->function;
__builtin_memset(name, 0, sizeof(name));
bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
&perf_kwork_names, &key, name);
}
SEC("tracepoint/workqueue/workqueue_activate_work")
int latency_workqueue_activate_work(struct trace_event_raw_workqueue_activate_work *ctx)
{
struct work_key key = {
.type = KWORK_CLASS_WORKQUEUE,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->work,
};
return update_timestart(&perf_kwork_time, &key);
}
SEC("tracepoint/workqueue/workqueue_execute_start")
int latency_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
{
char name[MAX_KWORKNAME];
struct work_key key = {
.type = KWORK_CLASS_WORKQUEUE,
.cpu = bpf_get_smp_processor_id(),
.id = (__u64)ctx->work,
};
unsigned long long func_addr = (unsigned long long)ctx->function;
__builtin_memset(name, 0, sizeof(name));
bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
&perf_kwork_names, &key, name);
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/kwork_trace.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2020 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
/* map of perf event fds, num_cpu * num_metric entries */
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(int));
} events SEC(".maps");
/* readings at fentry */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(max_entries, 1);
} fentry_readings SEC(".maps");
/* accumulated readings */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(max_entries, 1);
} accum_readings SEC(".maps");
const volatile __u32 num_cpu = 1;
SEC("fentry/XXX")
int BPF_PROG(fentry_XXX)
{
__u32 key = bpf_get_smp_processor_id();
struct bpf_perf_event_value *ptr;
__u32 zero = 0;
long err;
/* look up before reading, to reduce error */
ptr = bpf_map_lookup_elem(&fentry_readings, &zero);
if (!ptr)
return 0;
err = bpf_perf_event_read_value(&events, key, ptr, sizeof(*ptr));
if (err)
return 0;
return 0;
}
static inline void
fexit_update_maps(struct bpf_perf_event_value *after)
{
struct bpf_perf_event_value *before, diff;
__u32 zero = 0;
before = bpf_map_lookup_elem(&fentry_readings, &zero);
/* only account samples with a valid fentry_reading */
if (before && before->counter) {
struct bpf_perf_event_value *accum;
diff.counter = after->counter - before->counter;
diff.enabled = after->enabled - before->enabled;
diff.running = after->running - before->running;
accum = bpf_map_lookup_elem(&accum_readings, &zero);
if (accum) {
accum->counter += diff.counter;
accum->enabled += diff.enabled;
accum->running += diff.running;
}
}
}
SEC("fexit/XXX")
int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value reading;
__u32 cpu = bpf_get_smp_processor_id();
int err;
/* read all events before updating the maps, to reduce error */
err = bpf_perf_event_read_value(&events, cpu, &reading, sizeof(reading));
if (err)
return 0;
fexit_update_maps(&reading);
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2022 Google
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <asm-generic/errno-base.h>
#include "lock_data.h"
/* for collect_lock_syms(). 4096 was rejected by the verifier */
#define MAX_CPUS 1024
/* lock contention flags from include/trace/events/lock.h */
#define LCB_F_SPIN (1U << 0)
#define LCB_F_READ (1U << 1)
#define LCB_F_WRITE (1U << 2)
#define LCB_F_RT (1U << 3)
#define LCB_F_PERCPU (1U << 4)
#define LCB_F_MUTEX (1U << 5)
struct tstamp_data {
__u64 timestamp;
__u64 lock;
__u32 flags;
__s32 stack_id;
};
/* callstack storage */
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u64));
__uint(max_entries, MAX_ENTRIES);
} stacks SEC(".maps");
/* maintain timestamp at the beginning of contention */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct tstamp_data);
__uint(max_entries, MAX_ENTRIES);
} tstamp SEC(".maps");
/* actual lock contention statistics */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(struct contention_key));
__uint(value_size, sizeof(struct contention_data));
__uint(max_entries, MAX_ENTRIES);
} lock_stat SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct contention_task_data));
__uint(max_entries, MAX_ENTRIES);
} task_data SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u64));
__uint(value_size, sizeof(__u32));
__uint(max_entries, MAX_ENTRIES);
} lock_syms SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} cpu_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} task_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} type_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u64));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} addr_filter SEC(".maps");
struct rw_semaphore___old {
struct task_struct *owner;
} __attribute__((preserve_access_index));
struct rw_semaphore___new {
atomic_long_t owner;
} __attribute__((preserve_access_index));
struct mm_struct___old {
struct rw_semaphore mmap_sem;
} __attribute__((preserve_access_index));
struct mm_struct___new {
struct rw_semaphore mmap_lock;
} __attribute__((preserve_access_index));
/* control flags */
int enabled;
int has_cpu;
int has_task;
int has_type;
int has_addr;
int needs_callstack;
int stack_skip;
int lock_owner;
/* determine the key of lock stat */
int aggr_mode;
/* error stat */
int task_fail;
int stack_fail;
int time_fail;
int data_fail;
int task_map_full;
int data_map_full;
static inline int can_record(u64 *ctx)
{
if (has_cpu) {
__u32 cpu = bpf_get_smp_processor_id();
__u8 *ok;
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
if (!ok)
return 0;
}
if (has_task) {
__u8 *ok;
__u32 pid = bpf_get_current_pid_tgid();
ok = bpf_map_lookup_elem(&task_filter, &pid);
if (!ok)
return 0;
}
if (has_type) {
__u8 *ok;
__u32 flags = (__u32)ctx[1];
ok = bpf_map_lookup_elem(&type_filter, &flags);
if (!ok)
return 0;
}
if (has_addr) {
__u8 *ok;
__u64 addr = ctx[0];
ok = bpf_map_lookup_elem(&addr_filter, &addr);
if (!ok)
return 0;
}
return 1;
}
static inline int update_task_data(struct task_struct *task)
{
struct contention_task_data *p;
int pid, err;
err = bpf_core_read(&pid, sizeof(pid), &task->pid);
if (err)
return -1;
p = bpf_map_lookup_elem(&task_data, &pid);
if (p == NULL && !task_map_full) {
struct contention_task_data data = {};
BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
if (bpf_map_update_elem(&task_data, &pid, &data, BPF_NOEXIST) == -E2BIG)
task_map_full = 1;
}
return 0;
}
#ifndef __has_builtin
# define __has_builtin(x) 0
#endif
static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags)
{
struct task_struct *task;
__u64 owner = 0;
if (flags & LCB_F_MUTEX) {
struct mutex *mutex = (void *)lock;
owner = BPF_CORE_READ(mutex, owner.counter);
} else if (flags == LCB_F_READ || flags == LCB_F_WRITE) {
/*
* Support for the BPF_TYPE_MATCHES argument to the
* __builtin_preserve_type_info builtin was added at some point during
* development of clang 15 and it's what is needed for
* bpf_core_type_matches.
*/
#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
if (bpf_core_type_matches(struct rw_semaphore___old)) {
struct rw_semaphore___old *rwsem = (void *)lock;
owner = (unsigned long)BPF_CORE_READ(rwsem, owner);
} else if (bpf_core_type_matches(struct rw_semaphore___new)) {
struct rw_semaphore___new *rwsem = (void *)lock;
owner = BPF_CORE_READ(rwsem, owner.counter);
}
#else
/* assume new struct */
struct rw_semaphore *rwsem = (void *)lock;
owner = BPF_CORE_READ(rwsem, owner.counter);
#endif
}
if (!owner)
return NULL;
task = (void *)(owner & ~7UL);
return task;
}
static inline __u32 check_lock_type(__u64 lock, __u32 flags)
{
struct task_struct *curr;
struct mm_struct___old *mm_old;
struct mm_struct___new *mm_new;
switch (flags) {
case LCB_F_READ: /* rwsem */
case LCB_F_WRITE:
curr = bpf_get_current_task_btf();
if (curr->mm == NULL)
break;
mm_new = (void *)curr->mm;
if (bpf_core_field_exists(mm_new->mmap_lock)) {
if (&mm_new->mmap_lock == (void *)lock)
return LCD_F_MMAP_LOCK;
break;
}
mm_old = (void *)curr->mm;
if (bpf_core_field_exists(mm_old->mmap_sem)) {
if (&mm_old->mmap_sem == (void *)lock)
return LCD_F_MMAP_LOCK;
}
break;
case LCB_F_SPIN: /* spinlock */
curr = bpf_get_current_task_btf();
if (&curr->sighand->siglock == (void *)lock)
return LCD_F_SIGHAND_LOCK;
break;
default:
break;
}
return 0;
}
SEC("tp_btf/contention_begin")
int contention_begin(u64 *ctx)
{
__u32 pid;
struct tstamp_data *pelem;
if (!enabled || !can_record(ctx))
return 0;
pid = bpf_get_current_pid_tgid();
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (pelem && pelem->lock)
return 0;
if (pelem == NULL) {
struct tstamp_data zero = {};
bpf_map_update_elem(&tstamp, &pid, &zero, BPF_ANY);
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (pelem == NULL) {
__sync_fetch_and_add(&task_fail, 1);
return 0;
}
}
pelem->timestamp = bpf_ktime_get_ns();
pelem->lock = (__u64)ctx[0];
pelem->flags = (__u32)ctx[1];
if (needs_callstack) {
pelem->stack_id = bpf_get_stackid(ctx, &stacks,
BPF_F_FAST_STACK_CMP | stack_skip);
if (pelem->stack_id < 0)
__sync_fetch_and_add(&stack_fail, 1);
} else if (aggr_mode == LOCK_AGGR_TASK) {
struct task_struct *task;
if (lock_owner) {
task = get_lock_owner(pelem->lock, pelem->flags);
/* The flags is not used anymore. Pass the owner pid. */
if (task)
pelem->flags = BPF_CORE_READ(task, pid);
else
pelem->flags = -1U;
} else {
task = bpf_get_current_task_btf();
}
if (task) {
if (update_task_data(task) < 0 && lock_owner)
pelem->flags = -1U;
}
}
return 0;
}
SEC("tp_btf/contention_end")
int contention_end(u64 *ctx)
{
__u32 pid;
struct tstamp_data *pelem;
struct contention_key key = {};
struct contention_data *data;
__u64 duration;
if (!enabled)
return 0;
pid = bpf_get_current_pid_tgid();
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (!pelem || pelem->lock != ctx[0])
return 0;
duration = bpf_ktime_get_ns() - pelem->timestamp;
if ((__s64)duration < 0) {
bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&time_fail, 1);
return 0;
}
switch (aggr_mode) {
case LOCK_AGGR_CALLER:
key.stack_id = pelem->stack_id;
break;
case LOCK_AGGR_TASK:
if (lock_owner)
key.pid = pelem->flags;
else
key.pid = pid;
if (needs_callstack)
key.stack_id = pelem->stack_id;
break;
case LOCK_AGGR_ADDR:
key.lock_addr = pelem->lock;
if (needs_callstack)
key.stack_id = pelem->stack_id;
break;
default:
/* should not happen */
return 0;
}
data = bpf_map_lookup_elem(&lock_stat, &key);
if (!data) {
if (data_map_full) {
bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&data_fail, 1);
return 0;
}
struct contention_data first = {
.total_time = duration,
.max_time = duration,
.min_time = duration,
.count = 1,
.flags = pelem->flags,
};
int err;
if (aggr_mode == LOCK_AGGR_ADDR)
first.flags |= check_lock_type(pelem->lock, pelem->flags);
err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
if (err < 0) {
if (err == -E2BIG)
data_map_full = 1;
__sync_fetch_and_add(&data_fail, 1);
}
bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
__sync_fetch_and_add(&data->total_time, duration);
__sync_fetch_and_add(&data->count, 1);
/* FIXME: need atomic operations */
if (data->max_time < duration)
data->max_time = duration;
if (data->min_time > duration)
data->min_time = duration;
bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
extern struct rq runqueues __ksym;
struct rq___old {
raw_spinlock_t lock;
} __attribute__((preserve_access_index));
struct rq___new {
raw_spinlock_t __lock;
} __attribute__((preserve_access_index));
SEC("raw_tp/bpf_test_finish")
int BPF_PROG(collect_lock_syms)
{
__u64 lock_addr, lock_off;
__u32 lock_flag;
if (bpf_core_field_exists(struct rq___new, __lock))
lock_off = offsetof(struct rq___new, __lock);
else
lock_off = offsetof(struct rq___old, lock);
for (int i = 0; i < MAX_CPUS; i++) {
struct rq *rq = bpf_per_cpu_ptr(&runqueues, i);
if (rq == NULL)
break;
lock_addr = (__u64)(void *)rq + lock_off;
lock_flag = LOCK_CLASS_RQLOCK;
bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
}
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/lock_contention.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
// Copyright (c) 2021 Google
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#define MAX_LEVELS 10 // max cgroup hierarchy level: arbitrary
#define MAX_EVENTS 32 // max events per cgroup: arbitrary
// NOTE: many of map and global data will be modified before loading
// from the userspace (perf tool) using the skeleton helpers.
// single set of global perf events to measure
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(int));
__uint(max_entries, 1);
} events SEC(".maps");
// from cgroup id to event index
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u64));
__uint(value_size, sizeof(__u32));
__uint(max_entries, 1);
} cgrp_idx SEC(".maps");
// per-cpu event snapshots to calculate delta
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
} prev_readings SEC(".maps");
// aggregated event values for each cgroup (per-cpu)
// will be read from the user-space
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
} cgrp_readings SEC(".maps");
/* new kernel cgroup definition */
struct cgroup___new {
int level;
struct cgroup *ancestors[];
} __attribute__((preserve_access_index));
/* old kernel cgroup definition */
struct cgroup___old {
int level;
u64 ancestor_ids[];
} __attribute__((preserve_access_index));
const volatile __u32 num_events = 1;
const volatile __u32 num_cpus = 1;
int enabled = 0;
int use_cgroup_v2 = 0;
int perf_subsys_id = -1;
static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)
{
/* recast pointer to capture new type for compiler */
struct cgroup___new *cgrp_new = (void *)cgrp;
if (bpf_core_field_exists(cgrp_new->ancestors)) {
return BPF_CORE_READ(cgrp_new, ancestors[level], kn, id);
} else {
/* recast pointer to capture old type for compiler */
struct cgroup___old *cgrp_old = (void *)cgrp;
return BPF_CORE_READ(cgrp_old, ancestor_ids[level]);
}
}
static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
{
struct task_struct *p = (void *)bpf_get_current_task();
struct cgroup *cgrp;
register int i = 0;
__u32 *elem;
int level;
int cnt;
if (perf_subsys_id == -1) {
#if __has_builtin(__builtin_preserve_enum_value)
perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
perf_event_cgrp_id);
#else
perf_subsys_id = perf_event_cgrp_id;
#endif
}
cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
level = BPF_CORE_READ(cgrp, level);
for (cnt = 0; i < MAX_LEVELS; i++) {
__u64 cgrp_id;
if (i > level)
break;
// convert cgroup-id to a map index
cgrp_id = get_cgroup_v1_ancestor_id(cgrp, i);
elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
if (!elem)
continue;
cgrps[cnt++] = *elem;
if (cnt == size)
break;
}
return cnt;
}
static inline int get_cgroup_v2_idx(__u32 *cgrps, int size)
{
register int i = 0;
__u32 *elem;
int cnt;
for (cnt = 0; i < MAX_LEVELS; i++) {
__u64 cgrp_id = bpf_get_current_ancestor_cgroup_id(i);
if (cgrp_id == 0)
break;
// convert cgroup-id to a map index
elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
if (!elem)
continue;
cgrps[cnt++] = *elem;
if (cnt == size)
break;
}
return cnt;
}
static int bperf_cgroup_count(void)
{
register __u32 idx = 0; // to have it in a register to pass BPF verifier
register int c = 0;
struct bpf_perf_event_value val, delta, *prev_val, *cgrp_val;
__u32 cpu = bpf_get_smp_processor_id();
__u32 cgrp_idx[MAX_LEVELS];
int cgrp_cnt;
__u32 key, cgrp;
long err;
if (use_cgroup_v2)
cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, MAX_LEVELS);
else
cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, MAX_LEVELS);
for ( ; idx < MAX_EVENTS; idx++) {
if (idx == num_events)
break;
// XXX: do not pass idx directly (for verifier)
key = idx;
// this is per-cpu array for diff
prev_val = bpf_map_lookup_elem(&prev_readings, &key);
if (!prev_val) {
val.counter = val.enabled = val.running = 0;
bpf_map_update_elem(&prev_readings, &key, &val, BPF_ANY);
prev_val = bpf_map_lookup_elem(&prev_readings, &key);
if (!prev_val)
continue;
}
// read from global perf_event array
key = idx * num_cpus + cpu;
err = bpf_perf_event_read_value(&events, key, &val, sizeof(val));
if (err)
continue;
if (enabled) {
delta.counter = val.counter - prev_val->counter;
delta.enabled = val.enabled - prev_val->enabled;
delta.running = val.running - prev_val->running;
for (c = 0; c < MAX_LEVELS; c++) {
if (c == cgrp_cnt)
break;
cgrp = cgrp_idx[c];
// aggregate the result by cgroup
key = cgrp * num_events + idx;
cgrp_val = bpf_map_lookup_elem(&cgrp_readings, &key);
if (cgrp_val) {
cgrp_val->counter += delta.counter;
cgrp_val->enabled += delta.enabled;
cgrp_val->running += delta.running;
} else {
bpf_map_update_elem(&cgrp_readings, &key,
&delta, BPF_ANY);
}
}
}
*prev_val = val;
}
return 0;
}
// This will be attached to cgroup-switches event for each cpu
SEC("perf_event")
int BPF_PROG(on_cgrp_switch)
{
return bperf_cgroup_count();
}
SEC("raw_tp/sched_switch")
int BPF_PROG(trigger_read)
{
return bperf_cgroup_count();
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/bperf_cgroup.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2023 Red Hat
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
unsigned int nr_uprobes;
SEC("uprobe")
int BPF_UPROBE(empty)
{
return 0;
}
SEC("uprobe")
int BPF_UPROBE(trace_printk)
{
char fmt[] = "perf bench uprobe %u";
bpf_trace_printk(fmt, sizeof(fmt), ++nr_uprobes);
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/bench_uprobe.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2023 Google
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "sample-filter.h"
/* BPF map that will be filled by user space */
struct filters {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct perf_bpf_filter_entry);
__uint(max_entries, MAX_FILTERS);
} filters SEC(".maps");
int dropped;
void *bpf_cast_to_kern_ctx(void *) __ksym;
/* new kernel perf_sample_data definition */
struct perf_sample_data___new {
__u64 sample_flags;
} __attribute__((preserve_access_index));
/* new kernel perf_mem_data_src definition */
union perf_mem_data_src___new {
__u64 val;
struct {
__u64 mem_op:5, /* type of opcode */
mem_lvl:14, /* memory hierarchy level */
mem_snoop:5, /* snoop mode */
mem_lock:2, /* lock instr */
mem_dtlb:7, /* tlb access */
mem_lvl_num:4, /* memory hierarchy level number */
mem_remote:1, /* remote */
mem_snoopx:2, /* snoop mode, ext */
mem_blk:3, /* access blocked */
mem_hops:3, /* hop level */
mem_rsvd:18;
};
};
/* helper function to return the given perf sample data */
static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
struct perf_bpf_filter_entry *entry)
{
struct perf_sample_data___new *data = (void *)kctx->data;
if (!bpf_core_field_exists(data->sample_flags) ||
(data->sample_flags & entry->flags) == 0)
return 0;
switch (entry->flags) {
case PERF_SAMPLE_IP:
return kctx->data->ip;
case PERF_SAMPLE_ID:
return kctx->data->id;
case PERF_SAMPLE_TID:
if (entry->part)
return kctx->data->tid_entry.pid;
else
return kctx->data->tid_entry.tid;
case PERF_SAMPLE_CPU:
return kctx->data->cpu_entry.cpu;
case PERF_SAMPLE_TIME:
return kctx->data->time;
case PERF_SAMPLE_ADDR:
return kctx->data->addr;
case PERF_SAMPLE_PERIOD:
return kctx->data->period;
case PERF_SAMPLE_TRANSACTION:
return kctx->data->txn;
case PERF_SAMPLE_WEIGHT_STRUCT:
if (entry->part == 1)
return kctx->data->weight.var1_dw;
if (entry->part == 2)
return kctx->data->weight.var2_w;
if (entry->part == 3)
return kctx->data->weight.var3_w;
/* fall through */
case PERF_SAMPLE_WEIGHT:
return kctx->data->weight.full;
case PERF_SAMPLE_PHYS_ADDR:
return kctx->data->phys_addr;
case PERF_SAMPLE_CODE_PAGE_SIZE:
return kctx->data->code_page_size;
case PERF_SAMPLE_DATA_PAGE_SIZE:
return kctx->data->data_page_size;
case PERF_SAMPLE_DATA_SRC:
if (entry->part == 1)
return kctx->data->data_src.mem_op;
if (entry->part == 2)
return kctx->data->data_src.mem_lvl_num;
if (entry->part == 3) {
__u32 snoop = kctx->data->data_src.mem_snoop;
__u32 snoopx = kctx->data->data_src.mem_snoopx;
return (snoopx << 5) | snoop;
}
if (entry->part == 4)
return kctx->data->data_src.mem_remote;
if (entry->part == 5)
return kctx->data->data_src.mem_lock;
if (entry->part == 6)
return kctx->data->data_src.mem_dtlb;
if (entry->part == 7)
return kctx->data->data_src.mem_blk;
if (entry->part == 8) {
union perf_mem_data_src___new *data = (void *)&kctx->data->data_src;
if (bpf_core_field_exists(data->mem_hops))
return data->mem_hops;
return 0;
}
/* return the whole word */
return kctx->data->data_src.val;
default:
break;
}
return 0;
}
#define CHECK_RESULT(data, op, val) \
if (!(data op val)) { \
if (!in_group) \
goto drop; \
} else if (in_group) { \
group_result = 1; \
}
/* BPF program to be called from perf event overflow handler */
SEC("perf_event")
int perf_sample_filter(void *ctx)
{
struct bpf_perf_event_data_kern *kctx;
struct perf_bpf_filter_entry *entry;
__u64 sample_data;
int in_group = 0;
int group_result = 0;
int i;
kctx = bpf_cast_to_kern_ctx(ctx);
for (i = 0; i < MAX_FILTERS; i++) {
int key = i; /* needed for verifier :( */
entry = bpf_map_lookup_elem(&filters, &key);
if (entry == NULL)
break;
sample_data = perf_get_sample(kctx, entry);
switch (entry->op) {
case PBF_OP_EQ:
CHECK_RESULT(sample_data, ==, entry->value)
break;
case PBF_OP_NEQ:
CHECK_RESULT(sample_data, !=, entry->value)
break;
case PBF_OP_GT:
CHECK_RESULT(sample_data, >, entry->value)
break;
case PBF_OP_GE:
CHECK_RESULT(sample_data, >=, entry->value)
break;
case PBF_OP_LT:
CHECK_RESULT(sample_data, <, entry->value)
break;
case PBF_OP_LE:
CHECK_RESULT(sample_data, <=, entry->value)
break;
case PBF_OP_AND:
CHECK_RESULT(sample_data, &, entry->value)
break;
case PBF_OP_GROUP_BEGIN:
in_group = 1;
group_result = 0;
break;
case PBF_OP_GROUP_END:
if (group_result == 0)
goto drop;
in_group = 0;
break;
}
}
/* generate sample data */
return 1;
drop:
__sync_fetch_and_add(&dropped, 1);
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/sample_filter.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bperf_u.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(max_entries, 1);
} diff_readings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(max_entries, 1);
} accum_readings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} filter SEC(".maps");
enum bperf_filter_type type = 0;
int enabled = 0;
SEC("fexit/XXX")
int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value *diff_val, *accum_val;
__u32 filter_key, zero = 0;
__u32 *accum_key;
if (!enabled)
return 0;
switch (type) {
case BPERF_FILTER_GLOBAL:
accum_key = &zero;
goto do_add;
case BPERF_FILTER_CPU:
filter_key = bpf_get_smp_processor_id();
break;
case BPERF_FILTER_PID:
filter_key = bpf_get_current_pid_tgid() & 0xffffffff;
break;
case BPERF_FILTER_TGID:
filter_key = bpf_get_current_pid_tgid() >> 32;
break;
default:
return 0;
}
accum_key = bpf_map_lookup_elem(&filter, &filter_key);
if (!accum_key)
return 0;
do_add:
diff_val = bpf_map_lookup_elem(&diff_readings, &zero);
if (!diff_val)
return 0;
accum_val = bpf_map_lookup_elem(&accum_readings, accum_key);
if (!accum_val)
return 0;
accum_val->counter += diff_val->counter;
accum_val->enabled += diff_val->enabled;
accum_val->running += diff_val->running;
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/bperf_follower.bpf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
*
* This exactly matches what is marshalled into the raw_syscall:sys_enter
* payload expected by the 'perf trace' beautifiers.
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/limits.h>
/**
* is_power_of_2() - check if a value is a power of two
* @n: the value to check
*
* Determine whether some value is a power of two, where zero is *not*
* considered a power of two. Return: true if @n is a power of 2, otherwise
* false.
*/
#define is_power_of_2(n) (n != 0 && ((n & (n - 1)) == 0))
#define MAX_CPUS 4096
// FIXME: These should come from system headers
typedef char bool;
typedef int pid_t;
typedef long long int __s64;
typedef __s64 time64_t;
struct timespec64 {
time64_t tv_sec;
long int tv_nsec;
};
/* bpf-output associated map */
struct __augmented_syscalls__ {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, int);
__type(value, __u32);
__uint(max_entries, MAX_CPUS);
} __augmented_syscalls__ SEC(".maps");
/*
* What to augment at entry?
*
* Pointer arg payloads (filenames, etc) passed from userspace to the kernel
*/
struct syscalls_sys_enter {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 512);
} syscalls_sys_enter SEC(".maps");
/*
* What to augment at exit?
*
* Pointer arg payloads returned from the kernel (struct stat, etc) to userspace.
*/
struct syscalls_sys_exit {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 512);
} syscalls_sys_exit SEC(".maps");
struct syscall_enter_args {
unsigned long long common_tp_fields;
long syscall_nr;
unsigned long args[6];
};
struct syscall_exit_args {
unsigned long long common_tp_fields;
long syscall_nr;
long ret;
};
struct augmented_arg {
unsigned int size;
int err;
char value[PATH_MAX];
};
struct pids_filtered {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, pid_t);
__type(value, bool);
__uint(max_entries, 64);
} pids_filtered SEC(".maps");
/*
* Desired design of maximum size and alignment (see RFC2553)
*/
#define SS_MAXSIZE 128 /* Implementation specific max size */
typedef unsigned short sa_family_t;
/*
* FIXME: Should come from system headers
*
* The definition uses anonymous union and struct in order to control the
* default alignment.
*/
struct sockaddr_storage {
union {
struct {
sa_family_t ss_family; /* address family */
/* Following field(s) are implementation specific */
char __data[SS_MAXSIZE - sizeof(unsigned short)];
/* space to achieve desired size, */
/* _SS_MAXSIZE value minus size of ss_family */
};
void *__align; /* implementation specific desired alignment */
};
};
struct augmented_args_payload {
struct syscall_enter_args args;
union {
struct {
struct augmented_arg arg, arg2;
};
struct sockaddr_storage saddr;
char __data[sizeof(struct augmented_arg)];
};
};
// We need more tmp space than the BPF stack can give us
struct augmented_args_tmp {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, int);
__type(value, struct augmented_args_payload);
__uint(max_entries, 1);
} augmented_args_tmp SEC(".maps");
static inline struct augmented_args_payload *augmented_args_payload(void)
{
int key = 0;
return bpf_map_lookup_elem(&augmented_args_tmp, &key);
}
static inline int augmented__output(void *ctx, struct augmented_args_payload *args, int len)
{
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, args, len);
}
static inline
unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
{
unsigned int augmented_len = sizeof(*augmented_arg);
int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
augmented_arg->size = augmented_arg->err = 0;
/*
* probe_read_str may return < 0, e.g. -EFAULT
* So we leave that in the augmented_arg->size that userspace will
*/
if (string_len > 0) {
augmented_len -= sizeof(augmented_arg->value) - string_len;
_Static_assert(is_power_of_2(sizeof(augmented_arg->value)), "sizeof(augmented_arg->value) needs to be a power of two");
augmented_len &= sizeof(augmented_arg->value) - 1;
augmented_arg->size = string_len;
} else {
/*
* So that username notice the error while still being able
* to skip this augmented arg record
*/
augmented_arg->err = string_len;
augmented_len = offsetof(struct augmented_arg, value);
}
return augmented_len;
}
SEC("tp/raw_syscalls/sys_enter")
int syscall_unaugmented(struct syscall_enter_args *args)
{
return 1;
}
/*
* These will be tail_called from SEC("raw_syscalls:sys_enter"), so will find in
* augmented_args_tmp what was read by that raw_syscalls:sys_enter and go
* on from there, reading the first syscall arg as a string, i.e. open's
* filename.
*/
SEC("tp/syscalls/sys_enter_connect")
int sys_enter_connect(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *sockaddr_arg = (const void *)args->args[1];
unsigned int socklen = args->args[2];
unsigned int len = sizeof(augmented_args->args);
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
_Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
socklen &= sizeof(augmented_args->saddr) - 1;
bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
SEC("tp/syscalls/sys_enter_sendto")
int sys_enter_sendto(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *sockaddr_arg = (const void *)args->args[4];
unsigned int socklen = args->args[5];
unsigned int len = sizeof(augmented_args->args);
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
socklen &= sizeof(augmented_args->saddr) - 1;
bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
SEC("tp/syscalls/sys_enter_open")
int sys_enter_open(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *filename_arg = (const void *)args->args[0];
unsigned int len = sizeof(augmented_args->args);
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
len += augmented_arg__read_str(&augmented_args->arg, filename_arg, sizeof(augmented_args->arg.value));
return augmented__output(args, augmented_args, len);
}
SEC("tp/syscalls/sys_enter_openat")
int sys_enter_openat(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *filename_arg = (const void *)args->args[1];
unsigned int len = sizeof(augmented_args->args);
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
len += augmented_arg__read_str(&augmented_args->arg, filename_arg, sizeof(augmented_args->arg.value));
return augmented__output(args, augmented_args, len);
}
SEC("tp/syscalls/sys_enter_rename")
int sys_enter_rename(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *oldpath_arg = (const void *)args->args[0],
*newpath_arg = (const void *)args->args[1];
unsigned int len = sizeof(augmented_args->args), oldpath_len;
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
return augmented__output(args, augmented_args, len);
}
SEC("tp/syscalls/sys_enter_renameat")
int sys_enter_renameat(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *oldpath_arg = (const void *)args->args[1],
*newpath_arg = (const void *)args->args[3];
unsigned int len = sizeof(augmented_args->args), oldpath_len;
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
return augmented__output(args, augmented_args, len);
}
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
// we need just the start, get the size to then copy it
struct perf_event_attr_size {
__u32 type;
/*
* Size of the attr structure, for fwd/bwd compat.
*/
__u32 size;
};
SEC("tp/syscalls/sys_enter_perf_event_open")
int sys_enter_perf_event_open(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const struct perf_event_attr_size *attr = (const struct perf_event_attr_size *)args->args[0], *attr_read;
unsigned int len = sizeof(augmented_args->args);
if (augmented_args == NULL)
goto failure;
if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
goto failure;
attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
__u32 size = attr_read->size;
if (!size)
size = PERF_ATTR_SIZE_VER0;
if (size > sizeof(augmented_args->__data))
goto failure;
// Now that we read attr->size and tested it against the size limits, read it completely
if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
goto failure;
return augmented__output(args, augmented_args, len + size);
failure:
return 1; /* Failure: don't filter */
}
SEC("tp/syscalls/sys_enter_clock_nanosleep")
int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *rqtp_arg = (const void *)args->args[2];
unsigned int len = sizeof(augmented_args->args);
__u32 size = sizeof(struct timespec64);
if (augmented_args == NULL)
goto failure;
if (size > sizeof(augmented_args->__data))
goto failure;
bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
return augmented__output(args, augmented_args, len + size);
failure:
return 1; /* Failure: don't filter */
}
static pid_t getpid(void)
{
return bpf_get_current_pid_tgid();
}
static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
{
return bpf_map_lookup_elem(pids, &pid) != NULL;
}
SEC("tp/raw_syscalls/sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args;
/*
* We start len, the amount of data that will be in the perf ring
* buffer, if this is not filtered out by one of pid_filter__has(),
* syscall->enabled, etc, with the non-augmented raw syscall payload,
* i.e. sizeof(augmented_args->args).
*
* We'll add to this as we add augmented syscalls right after that
* initial, non-augmented raw_syscalls:sys_enter payload.
*/
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
augmented_args = augmented_args_payload();
if (augmented_args == NULL)
return 1;
bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
/*
* Jump to syscall specific augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
* unaugmented tracepoint payload.
*/
bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
// If not found on the PROG_ARRAY syscalls map, then we're filtering it:
return 0;
}
SEC("tp/raw_syscalls/sys_exit")
int sys_exit(struct syscall_exit_args *args)
{
struct syscall_exit_args exit_args;
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
bpf_probe_read(&exit_args, sizeof(exit_args), args);
/*
* Jump to syscall specific return augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
* unaugmented tracepoint payload.
*/
bpf_tail_call(args, &syscalls_sys_exit, exit_args.syscall_nr);
/*
* If not found on the PROG_ARRAY syscalls map, then we're filtering it:
*/
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2022 Google
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
/* task->flags for off-cpu analysis */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
/* task->state for off-cpu analysis */
#define TASK_INTERRUPTIBLE 0x0001
#define TASK_UNINTERRUPTIBLE 0x0002
/* create a new thread */
#define CLONE_THREAD 0x10000
#define MAX_STACKS 32
#define MAX_ENTRIES 102400
struct tstamp_data {
__u32 stack_id;
__u32 state;
__u64 timestamp;
};
struct offcpu_key {
__u32 pid;
__u32 tgid;
__u32 stack_id;
__u32 state;
__u64 cgroup_id;
};
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(key_size, sizeof(__u32));
__uint(value_size, MAX_STACKS * sizeof(__u64));
__uint(max_entries, MAX_ENTRIES);
} stacks SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct tstamp_data);
} tstamp SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(struct offcpu_key));
__uint(value_size, sizeof(__u64));
__uint(max_entries, MAX_ENTRIES);
} off_cpu SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} cpu_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} task_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u64));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} cgroup_filter SEC(".maps");
/* new kernel task_struct definition */
struct task_struct___new {
long __state;
} __attribute__((preserve_access_index));
/* old kernel task_struct definition */
struct task_struct___old {
long state;
} __attribute__((preserve_access_index));
int enabled = 0;
int has_cpu = 0;
int has_task = 0;
int has_cgroup = 0;
int uses_tgid = 0;
const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
const volatile bool uses_cgroup_v1 = false;
int perf_subsys_id = -1;
/*
* Old kernel used to call it task_struct->state and now it's '__state'.
* Use BPF CO-RE "ignored suffix rule" to deal with it like below:
*
* https://nakryiko.com/posts/bpf-core-reference-guide/#handling-incompatible-field-and-type-changes
*/
static inline int get_task_state(struct task_struct *t)
{
/* recast pointer to capture new type for compiler */
struct task_struct___new *t_new = (void *)t;
if (bpf_core_field_exists(t_new->__state)) {
return BPF_CORE_READ(t_new, __state);
} else {
/* recast pointer to capture old type for compiler */
struct task_struct___old *t_old = (void *)t;
return BPF_CORE_READ(t_old, state);
}
}
static inline __u64 get_cgroup_id(struct task_struct *t)
{
struct cgroup *cgrp;
if (!uses_cgroup_v1)
return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id);
if (perf_subsys_id == -1) {
#if __has_builtin(__builtin_preserve_enum_value)
perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
perf_event_cgrp_id);
#else
perf_subsys_id = perf_event_cgrp_id;
#endif
}
cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup);
return BPF_CORE_READ(cgrp, kn, id);
}
static inline int can_record(struct task_struct *t, int state)
{
/* kernel threads don't have user stack */
if (t->flags & PF_KTHREAD)
return 0;
if (state != TASK_INTERRUPTIBLE &&
state != TASK_UNINTERRUPTIBLE)
return 0;
if (has_cpu) {
__u32 cpu = bpf_get_smp_processor_id();
__u8 *ok;
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
if (!ok)
return 0;
}
if (has_task) {
__u8 *ok;
__u32 pid;
if (uses_tgid)
pid = t->tgid;
else
pid = t->pid;
ok = bpf_map_lookup_elem(&task_filter, &pid);
if (!ok)
return 0;
}
if (has_cgroup) {
__u8 *ok;
__u64 cgrp_id = get_cgroup_id(t);
ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp_id);
if (!ok)
return 0;
}
return 1;
}
static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
struct task_struct *next, int state)
{
__u64 ts;
__u32 stack_id;
struct tstamp_data *pelem;
ts = bpf_ktime_get_ns();
if (!can_record(prev, state))
goto next;
stack_id = bpf_get_stackid(ctx, &stacks,
BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK);
pelem = bpf_task_storage_get(&tstamp, prev, NULL,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!pelem)
goto next;
pelem->timestamp = ts;
pelem->state = state;
pelem->stack_id = stack_id;
next:
pelem = bpf_task_storage_get(&tstamp, next, NULL, 0);
if (pelem && pelem->timestamp) {
struct offcpu_key key = {
.pid = next->pid,
.tgid = next->tgid,
.stack_id = pelem->stack_id,
.state = pelem->state,
.cgroup_id = needs_cgroup ? get_cgroup_id(next) : 0,
};
__u64 delta = ts - pelem->timestamp;
__u64 *total;
total = bpf_map_lookup_elem(&off_cpu, &key);
if (total)
*total += delta;
else
bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
/* prevent to reuse the timestamp later */
pelem->timestamp = 0;
}
return 0;
}
SEC("tp_btf/task_newtask")
int on_newtask(u64 *ctx)
{
struct task_struct *task;
u64 clone_flags;
u32 pid;
u8 val = 1;
if (!uses_tgid)
return 0;
task = (struct task_struct *)bpf_get_current_task();
pid = BPF_CORE_READ(task, tgid);
if (!bpf_map_lookup_elem(&task_filter, &pid))
return 0;
task = (struct task_struct *)ctx[0];
clone_flags = ctx[1];
pid = task->tgid;
if (!(clone_flags & CLONE_THREAD))
bpf_map_update_elem(&task_filter, &pid, &val, BPF_NOEXIST);
return 0;
}
SEC("tp_btf/sched_switch")
int on_switch(u64 *ctx)
{
struct task_struct *prev, *next;
int prev_state;
if (!enabled)
return 0;
prev = (struct task_struct *)ctx[1];
next = (struct task_struct *)ctx[2];
if (has_prev_state)
prev_state = (int)ctx[3];
else
prev_state = get_task_state(prev);
return off_cpu_stat(ctx, prev, next, prev_state & 0xff);
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/off_cpu.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(int));
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} events SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(max_entries, 1);
} prev_readings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(max_entries, 1);
} diff_readings SEC(".maps");
SEC("raw_tp/sched_switch")
int BPF_PROG(on_switch)
{
struct bpf_perf_event_value val, *prev_val, *diff_val;
__u32 key = bpf_get_smp_processor_id();
__u32 zero = 0;
long err;
prev_val = bpf_map_lookup_elem(&prev_readings, &zero);
if (!prev_val)
return 0;
diff_val = bpf_map_lookup_elem(&diff_readings, &zero);
if (!diff_val)
return 0;
err = bpf_perf_event_read_value(&events, key, &val, sizeof(val));
if (err)
return 0;
diff_val->counter = val.counter - prev_val->counter;
diff_val->enabled = val.enabled - prev_val->enabled;
diff_val->running = val.running - prev_val->running;
*prev_val = val;
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/perf/util/bpf_skel/bperf_leader.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Google
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
// This should be in sync with "util/ftrace.h"
#define NUM_BUCKET 22
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u64));
__uint(value_size, sizeof(__u64));
__uint(max_entries, 10000);
} functime SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} cpu_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u8));
__uint(max_entries, 1);
} task_filter SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u64));
__uint(max_entries, NUM_BUCKET);
} latency SEC(".maps");
int enabled = 0;
int has_cpu = 0;
int has_task = 0;
int use_nsec = 0;
SEC("kprobe/func")
int BPF_PROG(func_begin)
{
__u64 key, now;
if (!enabled)
return 0;
key = bpf_get_current_pid_tgid();
if (has_cpu) {
__u32 cpu = bpf_get_smp_processor_id();
__u8 *ok;
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
if (!ok)
return 0;
}
if (has_task) {
__u32 pid = key & 0xffffffff;
__u8 *ok;
ok = bpf_map_lookup_elem(&task_filter, &pid);
if (!ok)
return 0;
}
now = bpf_ktime_get_ns();
// overwrite timestamp for nested functions
bpf_map_update_elem(&functime, &key, &now, BPF_ANY);
return 0;
}
SEC("kretprobe/func")
int BPF_PROG(func_end)
{
__u64 tid;
__u64 *start;
__u64 cmp_base = use_nsec ? 1 : 1000;
if (!enabled)
return 0;
tid = bpf_get_current_pid_tgid();
start = bpf_map_lookup_elem(&functime, &tid);
if (start) {
__s64 delta = bpf_ktime_get_ns() - *start;
__u32 key;
__u64 *hist;
bpf_map_delete_elem(&functime, &tid);
if (delta < 0)
return 0;
// calculate index using delta
for (key = 0; key < (NUM_BUCKET - 1); key++) {
if (delta < (cmp_base << key))
break;
}
hist = bpf_map_lookup_elem(&latency, &key);
if (!hist)
return 0;
*hist += 1;
}
return 0;
}
| linux-master | tools/perf/util/bpf_skel/func_latency.bpf.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/arm64/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_arm64(int id)
{
switch (id) {
case PERF_REG_ARM64_X0:
return "x0";
case PERF_REG_ARM64_X1:
return "x1";
case PERF_REG_ARM64_X2:
return "x2";
case PERF_REG_ARM64_X3:
return "x3";
case PERF_REG_ARM64_X4:
return "x4";
case PERF_REG_ARM64_X5:
return "x5";
case PERF_REG_ARM64_X6:
return "x6";
case PERF_REG_ARM64_X7:
return "x7";
case PERF_REG_ARM64_X8:
return "x8";
case PERF_REG_ARM64_X9:
return "x9";
case PERF_REG_ARM64_X10:
return "x10";
case PERF_REG_ARM64_X11:
return "x11";
case PERF_REG_ARM64_X12:
return "x12";
case PERF_REG_ARM64_X13:
return "x13";
case PERF_REG_ARM64_X14:
return "x14";
case PERF_REG_ARM64_X15:
return "x15";
case PERF_REG_ARM64_X16:
return "x16";
case PERF_REG_ARM64_X17:
return "x17";
case PERF_REG_ARM64_X18:
return "x18";
case PERF_REG_ARM64_X19:
return "x19";
case PERF_REG_ARM64_X20:
return "x20";
case PERF_REG_ARM64_X21:
return "x21";
case PERF_REG_ARM64_X22:
return "x22";
case PERF_REG_ARM64_X23:
return "x23";
case PERF_REG_ARM64_X24:
return "x24";
case PERF_REG_ARM64_X25:
return "x25";
case PERF_REG_ARM64_X26:
return "x26";
case PERF_REG_ARM64_X27:
return "x27";
case PERF_REG_ARM64_X28:
return "x28";
case PERF_REG_ARM64_X29:
return "x29";
case PERF_REG_ARM64_SP:
return "sp";
case PERF_REG_ARM64_LR:
return "lr";
case PERF_REG_ARM64_PC:
return "pc";
case PERF_REG_ARM64_VG:
return "vg";
default:
return NULL;
}
return NULL;
}
uint64_t __perf_reg_ip_arm64(void)
{
return PERF_REG_ARM64_PC;
}
uint64_t __perf_reg_sp_arm64(void)
{
return PERF_REG_ARM64_SP;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_aarch64.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/loongarch/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_loongarch(int id)
{
switch (id) {
case PERF_REG_LOONGARCH_PC:
return "PC";
case PERF_REG_LOONGARCH_R1:
return "%r1";
case PERF_REG_LOONGARCH_R2:
return "%r2";
case PERF_REG_LOONGARCH_R3:
return "%r3";
case PERF_REG_LOONGARCH_R4:
return "%r4";
case PERF_REG_LOONGARCH_R5:
return "%r5";
case PERF_REG_LOONGARCH_R6:
return "%r6";
case PERF_REG_LOONGARCH_R7:
return "%r7";
case PERF_REG_LOONGARCH_R8:
return "%r8";
case PERF_REG_LOONGARCH_R9:
return "%r9";
case PERF_REG_LOONGARCH_R10:
return "%r10";
case PERF_REG_LOONGARCH_R11:
return "%r11";
case PERF_REG_LOONGARCH_R12:
return "%r12";
case PERF_REG_LOONGARCH_R13:
return "%r13";
case PERF_REG_LOONGARCH_R14:
return "%r14";
case PERF_REG_LOONGARCH_R15:
return "%r15";
case PERF_REG_LOONGARCH_R16:
return "%r16";
case PERF_REG_LOONGARCH_R17:
return "%r17";
case PERF_REG_LOONGARCH_R18:
return "%r18";
case PERF_REG_LOONGARCH_R19:
return "%r19";
case PERF_REG_LOONGARCH_R20:
return "%r20";
case PERF_REG_LOONGARCH_R21:
return "%r21";
case PERF_REG_LOONGARCH_R22:
return "%r22";
case PERF_REG_LOONGARCH_R23:
return "%r23";
case PERF_REG_LOONGARCH_R24:
return "%r24";
case PERF_REG_LOONGARCH_R25:
return "%r25";
case PERF_REG_LOONGARCH_R26:
return "%r26";
case PERF_REG_LOONGARCH_R27:
return "%r27";
case PERF_REG_LOONGARCH_R28:
return "%r28";
case PERF_REG_LOONGARCH_R29:
return "%r29";
case PERF_REG_LOONGARCH_R30:
return "%r30";
case PERF_REG_LOONGARCH_R31:
return "%r31";
default:
break;
}
return NULL;
}
uint64_t __perf_reg_ip_loongarch(void)
{
return PERF_REG_LOONGARCH_PC;
}
uint64_t __perf_reg_sp_loongarch(void)
{
return PERF_REG_LOONGARCH_R3;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_loongarch.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/s390/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_s390(int id)
{
switch (id) {
case PERF_REG_S390_R0:
return "R0";
case PERF_REG_S390_R1:
return "R1";
case PERF_REG_S390_R2:
return "R2";
case PERF_REG_S390_R3:
return "R3";
case PERF_REG_S390_R4:
return "R4";
case PERF_REG_S390_R5:
return "R5";
case PERF_REG_S390_R6:
return "R6";
case PERF_REG_S390_R7:
return "R7";
case PERF_REG_S390_R8:
return "R8";
case PERF_REG_S390_R9:
return "R9";
case PERF_REG_S390_R10:
return "R10";
case PERF_REG_S390_R11:
return "R11";
case PERF_REG_S390_R12:
return "R12";
case PERF_REG_S390_R13:
return "R13";
case PERF_REG_S390_R14:
return "R14";
case PERF_REG_S390_R15:
return "R15";
case PERF_REG_S390_FP0:
return "FP0";
case PERF_REG_S390_FP1:
return "FP1";
case PERF_REG_S390_FP2:
return "FP2";
case PERF_REG_S390_FP3:
return "FP3";
case PERF_REG_S390_FP4:
return "FP4";
case PERF_REG_S390_FP5:
return "FP5";
case PERF_REG_S390_FP6:
return "FP6";
case PERF_REG_S390_FP7:
return "FP7";
case PERF_REG_S390_FP8:
return "FP8";
case PERF_REG_S390_FP9:
return "FP9";
case PERF_REG_S390_FP10:
return "FP10";
case PERF_REG_S390_FP11:
return "FP11";
case PERF_REG_S390_FP12:
return "FP12";
case PERF_REG_S390_FP13:
return "FP13";
case PERF_REG_S390_FP14:
return "FP14";
case PERF_REG_S390_FP15:
return "FP15";
case PERF_REG_S390_MASK:
return "MASK";
case PERF_REG_S390_PC:
return "PC";
default:
return NULL;
}
return NULL;
}
uint64_t __perf_reg_ip_s390(void)
{
return PERF_REG_S390_PC;
}
uint64_t __perf_reg_sp_s390(void)
{
return PERF_REG_S390_R15;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_s390.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../arch/csky/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_csky(int id)
{
switch (id) {
case PERF_REG_CSKY_A0:
return "a0";
case PERF_REG_CSKY_A1:
return "a1";
case PERF_REG_CSKY_A2:
return "a2";
case PERF_REG_CSKY_A3:
return "a3";
case PERF_REG_CSKY_REGS0:
return "regs0";
case PERF_REG_CSKY_REGS1:
return "regs1";
case PERF_REG_CSKY_REGS2:
return "regs2";
case PERF_REG_CSKY_REGS3:
return "regs3";
case PERF_REG_CSKY_REGS4:
return "regs4";
case PERF_REG_CSKY_REGS5:
return "regs5";
case PERF_REG_CSKY_REGS6:
return "regs6";
case PERF_REG_CSKY_REGS7:
return "regs7";
case PERF_REG_CSKY_REGS8:
return "regs8";
case PERF_REG_CSKY_REGS9:
return "regs9";
case PERF_REG_CSKY_SP:
return "sp";
case PERF_REG_CSKY_LR:
return "lr";
case PERF_REG_CSKY_PC:
return "pc";
#if defined(__CSKYABIV2__)
case PERF_REG_CSKY_EXREGS0:
return "exregs0";
case PERF_REG_CSKY_EXREGS1:
return "exregs1";
case PERF_REG_CSKY_EXREGS2:
return "exregs2";
case PERF_REG_CSKY_EXREGS3:
return "exregs3";
case PERF_REG_CSKY_EXREGS4:
return "exregs4";
case PERF_REG_CSKY_EXREGS5:
return "exregs5";
case PERF_REG_CSKY_EXREGS6:
return "exregs6";
case PERF_REG_CSKY_EXREGS7:
return "exregs7";
case PERF_REG_CSKY_EXREGS8:
return "exregs8";
case PERF_REG_CSKY_EXREGS9:
return "exregs9";
case PERF_REG_CSKY_EXREGS10:
return "exregs10";
case PERF_REG_CSKY_EXREGS11:
return "exregs11";
case PERF_REG_CSKY_EXREGS12:
return "exregs12";
case PERF_REG_CSKY_EXREGS13:
return "exregs13";
case PERF_REG_CSKY_EXREGS14:
return "exregs14";
case PERF_REG_CSKY_TLS:
return "tls";
case PERF_REG_CSKY_HI:
return "hi";
case PERF_REG_CSKY_LO:
return "lo";
#endif
default:
return NULL;
}
return NULL;
}
uint64_t __perf_reg_ip_csky(void)
{
return PERF_REG_CSKY_PC;
}
uint64_t __perf_reg_sp_csky(void)
{
return PERF_REG_CSKY_SP;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_csky.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/mips/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_mips(int id)
{
switch (id) {
case PERF_REG_MIPS_PC:
return "PC";
case PERF_REG_MIPS_R1:
return "$1";
case PERF_REG_MIPS_R2:
return "$2";
case PERF_REG_MIPS_R3:
return "$3";
case PERF_REG_MIPS_R4:
return "$4";
case PERF_REG_MIPS_R5:
return "$5";
case PERF_REG_MIPS_R6:
return "$6";
case PERF_REG_MIPS_R7:
return "$7";
case PERF_REG_MIPS_R8:
return "$8";
case PERF_REG_MIPS_R9:
return "$9";
case PERF_REG_MIPS_R10:
return "$10";
case PERF_REG_MIPS_R11:
return "$11";
case PERF_REG_MIPS_R12:
return "$12";
case PERF_REG_MIPS_R13:
return "$13";
case PERF_REG_MIPS_R14:
return "$14";
case PERF_REG_MIPS_R15:
return "$15";
case PERF_REG_MIPS_R16:
return "$16";
case PERF_REG_MIPS_R17:
return "$17";
case PERF_REG_MIPS_R18:
return "$18";
case PERF_REG_MIPS_R19:
return "$19";
case PERF_REG_MIPS_R20:
return "$20";
case PERF_REG_MIPS_R21:
return "$21";
case PERF_REG_MIPS_R22:
return "$22";
case PERF_REG_MIPS_R23:
return "$23";
case PERF_REG_MIPS_R24:
return "$24";
case PERF_REG_MIPS_R25:
return "$25";
case PERF_REG_MIPS_R28:
return "$28";
case PERF_REG_MIPS_R29:
return "$29";
case PERF_REG_MIPS_R30:
return "$30";
case PERF_REG_MIPS_R31:
return "$31";
default:
break;
}
return NULL;
}
uint64_t __perf_reg_ip_mips(void)
{
return PERF_REG_MIPS_PC;
}
uint64_t __perf_reg_sp_mips(void)
{
return PERF_REG_MIPS_R29;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_mips.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/arm/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_arm(int id)
{
switch (id) {
case PERF_REG_ARM_R0:
return "r0";
case PERF_REG_ARM_R1:
return "r1";
case PERF_REG_ARM_R2:
return "r2";
case PERF_REG_ARM_R3:
return "r3";
case PERF_REG_ARM_R4:
return "r4";
case PERF_REG_ARM_R5:
return "r5";
case PERF_REG_ARM_R6:
return "r6";
case PERF_REG_ARM_R7:
return "r7";
case PERF_REG_ARM_R8:
return "r8";
case PERF_REG_ARM_R9:
return "r9";
case PERF_REG_ARM_R10:
return "r10";
case PERF_REG_ARM_FP:
return "fp";
case PERF_REG_ARM_IP:
return "ip";
case PERF_REG_ARM_SP:
return "sp";
case PERF_REG_ARM_LR:
return "lr";
case PERF_REG_ARM_PC:
return "pc";
default:
return NULL;
}
return NULL;
}
uint64_t __perf_reg_ip_arm(void)
{
return PERF_REG_ARM_PC;
}
uint64_t __perf_reg_sp_arm(void)
{
return PERF_REG_ARM_SP;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_arm.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/riscv/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_riscv(int id)
{
switch (id) {
case PERF_REG_RISCV_PC:
return "pc";
case PERF_REG_RISCV_RA:
return "ra";
case PERF_REG_RISCV_SP:
return "sp";
case PERF_REG_RISCV_GP:
return "gp";
case PERF_REG_RISCV_TP:
return "tp";
case PERF_REG_RISCV_T0:
return "t0";
case PERF_REG_RISCV_T1:
return "t1";
case PERF_REG_RISCV_T2:
return "t2";
case PERF_REG_RISCV_S0:
return "s0";
case PERF_REG_RISCV_S1:
return "s1";
case PERF_REG_RISCV_A0:
return "a0";
case PERF_REG_RISCV_A1:
return "a1";
case PERF_REG_RISCV_A2:
return "a2";
case PERF_REG_RISCV_A3:
return "a3";
case PERF_REG_RISCV_A4:
return "a4";
case PERF_REG_RISCV_A5:
return "a5";
case PERF_REG_RISCV_A6:
return "a6";
case PERF_REG_RISCV_A7:
return "a7";
case PERF_REG_RISCV_S2:
return "s2";
case PERF_REG_RISCV_S3:
return "s3";
case PERF_REG_RISCV_S4:
return "s4";
case PERF_REG_RISCV_S5:
return "s5";
case PERF_REG_RISCV_S6:
return "s6";
case PERF_REG_RISCV_S7:
return "s7";
case PERF_REG_RISCV_S8:
return "s8";
case PERF_REG_RISCV_S9:
return "s9";
case PERF_REG_RISCV_S10:
return "s10";
case PERF_REG_RISCV_S11:
return "s11";
case PERF_REG_RISCV_T3:
return "t3";
case PERF_REG_RISCV_T4:
return "t4";
case PERF_REG_RISCV_T5:
return "t5";
case PERF_REG_RISCV_T6:
return "t6";
default:
return NULL;
}
return NULL;
}
uint64_t __perf_reg_ip_riscv(void)
{
return PERF_REG_RISCV_PC;
}
uint64_t __perf_reg_sp_riscv(void)
{
return PERF_REG_RISCV_SP;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_riscv.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/powerpc/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_powerpc(int id)
{
switch (id) {
case PERF_REG_POWERPC_R0:
return "r0";
case PERF_REG_POWERPC_R1:
return "r1";
case PERF_REG_POWERPC_R2:
return "r2";
case PERF_REG_POWERPC_R3:
return "r3";
case PERF_REG_POWERPC_R4:
return "r4";
case PERF_REG_POWERPC_R5:
return "r5";
case PERF_REG_POWERPC_R6:
return "r6";
case PERF_REG_POWERPC_R7:
return "r7";
case PERF_REG_POWERPC_R8:
return "r8";
case PERF_REG_POWERPC_R9:
return "r9";
case PERF_REG_POWERPC_R10:
return "r10";
case PERF_REG_POWERPC_R11:
return "r11";
case PERF_REG_POWERPC_R12:
return "r12";
case PERF_REG_POWERPC_R13:
return "r13";
case PERF_REG_POWERPC_R14:
return "r14";
case PERF_REG_POWERPC_R15:
return "r15";
case PERF_REG_POWERPC_R16:
return "r16";
case PERF_REG_POWERPC_R17:
return "r17";
case PERF_REG_POWERPC_R18:
return "r18";
case PERF_REG_POWERPC_R19:
return "r19";
case PERF_REG_POWERPC_R20:
return "r20";
case PERF_REG_POWERPC_R21:
return "r21";
case PERF_REG_POWERPC_R22:
return "r22";
case PERF_REG_POWERPC_R23:
return "r23";
case PERF_REG_POWERPC_R24:
return "r24";
case PERF_REG_POWERPC_R25:
return "r25";
case PERF_REG_POWERPC_R26:
return "r26";
case PERF_REG_POWERPC_R27:
return "r27";
case PERF_REG_POWERPC_R28:
return "r28";
case PERF_REG_POWERPC_R29:
return "r29";
case PERF_REG_POWERPC_R30:
return "r30";
case PERF_REG_POWERPC_R31:
return "r31";
case PERF_REG_POWERPC_NIP:
return "nip";
case PERF_REG_POWERPC_MSR:
return "msr";
case PERF_REG_POWERPC_ORIG_R3:
return "orig_r3";
case PERF_REG_POWERPC_CTR:
return "ctr";
case PERF_REG_POWERPC_LINK:
return "link";
case PERF_REG_POWERPC_XER:
return "xer";
case PERF_REG_POWERPC_CCR:
return "ccr";
case PERF_REG_POWERPC_SOFTE:
return "softe";
case PERF_REG_POWERPC_TRAP:
return "trap";
case PERF_REG_POWERPC_DAR:
return "dar";
case PERF_REG_POWERPC_DSISR:
return "dsisr";
case PERF_REG_POWERPC_SIER:
return "sier";
case PERF_REG_POWERPC_MMCRA:
return "mmcra";
case PERF_REG_POWERPC_MMCR0:
return "mmcr0";
case PERF_REG_POWERPC_MMCR1:
return "mmcr1";
case PERF_REG_POWERPC_MMCR2:
return "mmcr2";
case PERF_REG_POWERPC_MMCR3:
return "mmcr3";
case PERF_REG_POWERPC_SIER2:
return "sier2";
case PERF_REG_POWERPC_SIER3:
return "sier3";
case PERF_REG_POWERPC_PMC1:
return "pmc1";
case PERF_REG_POWERPC_PMC2:
return "pmc2";
case PERF_REG_POWERPC_PMC3:
return "pmc3";
case PERF_REG_POWERPC_PMC4:
return "pmc4";
case PERF_REG_POWERPC_PMC5:
return "pmc5";
case PERF_REG_POWERPC_PMC6:
return "pmc6";
case PERF_REG_POWERPC_SDAR:
return "sdar";
case PERF_REG_POWERPC_SIAR:
return "siar";
default:
break;
}
return NULL;
}
uint64_t __perf_reg_ip_powerpc(void)
{
return PERF_REG_POWERPC_NIP;
}
uint64_t __perf_reg_sp_powerpc(void)
{
return PERF_REG_POWERPC_R1;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_powerpc.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef HAVE_PERF_REGS_SUPPORT
#include "../perf_regs.h"
#include "../../../arch/x86/include/uapi/asm/perf_regs.h"
const char *__perf_reg_name_x86(int id)
{
switch (id) {
case PERF_REG_X86_AX:
return "AX";
case PERF_REG_X86_BX:
return "BX";
case PERF_REG_X86_CX:
return "CX";
case PERF_REG_X86_DX:
return "DX";
case PERF_REG_X86_SI:
return "SI";
case PERF_REG_X86_DI:
return "DI";
case PERF_REG_X86_BP:
return "BP";
case PERF_REG_X86_SP:
return "SP";
case PERF_REG_X86_IP:
return "IP";
case PERF_REG_X86_FLAGS:
return "FLAGS";
case PERF_REG_X86_CS:
return "CS";
case PERF_REG_X86_SS:
return "SS";
case PERF_REG_X86_DS:
return "DS";
case PERF_REG_X86_ES:
return "ES";
case PERF_REG_X86_FS:
return "FS";
case PERF_REG_X86_GS:
return "GS";
case PERF_REG_X86_R8:
return "R8";
case PERF_REG_X86_R9:
return "R9";
case PERF_REG_X86_R10:
return "R10";
case PERF_REG_X86_R11:
return "R11";
case PERF_REG_X86_R12:
return "R12";
case PERF_REG_X86_R13:
return "R13";
case PERF_REG_X86_R14:
return "R14";
case PERF_REG_X86_R15:
return "R15";
#define XMM(x) \
case PERF_REG_X86_XMM ## x: \
case PERF_REG_X86_XMM ## x + 1: \
return "XMM" #x;
XMM(0)
XMM(1)
XMM(2)
XMM(3)
XMM(4)
XMM(5)
XMM(6)
XMM(7)
XMM(8)
XMM(9)
XMM(10)
XMM(11)
XMM(12)
XMM(13)
XMM(14)
XMM(15)
#undef XMM
default:
return NULL;
}
return NULL;
}
uint64_t __perf_reg_ip_x86(void)
{
return PERF_REG_X86_IP;
}
uint64_t __perf_reg_sp_x86(void)
{
return PERF_REG_X86_SP;
}
#endif
| linux-master | tools/perf/util/perf-regs-arch/perf_regs_x86.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file setups defines to compile arch specific binary from the
* generic one.
*
* The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
* name and the definition of this function is included directly from
* 'arch/arm64/util/unwind-libunwind.c', to make sure that this function
* is defined no matter what arch the host is.
*
* Finally, the arch specific unwind methods are exported which will
* be assigned to each arm64 thread.
*/
#define REMOTE_UNWIND_LIBUNWIND
/* Define arch specific functions & regs for libunwind, should be
* defined before including "unwind.h"
*/
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arm64_reg_id(regnum)
#include "unwind.h"
#include "libunwind-aarch64.h"
#define perf_event_arm_regs perf_event_arm64_regs
#include <../../../arch/arm64/include/uapi/asm/perf_regs.h>
#undef perf_event_arm_regs
#include "../../arch/arm64/util/unwind-libunwind.c"
/* NO_LIBUNWIND_DEBUG_FRAME is a feature flag for local libunwind,
* assign NO_LIBUNWIND_DEBUG_FRAME_AARCH64 to it for compiling arm64
* unwind methods.
*/
#undef NO_LIBUNWIND_DEBUG_FRAME
#ifdef NO_LIBUNWIND_DEBUG_FRAME_AARCH64
#define NO_LIBUNWIND_DEBUG_FRAME
#endif
#include "util/unwind-libunwind-local.c"
struct unwind_libunwind_ops *
arm64_unwind_libunwind_ops = &_unwind_libunwind_ops;
| linux-master | tools/perf/util/libunwind/arm64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file setups defines to compile arch specific binary from the
* generic one.
*
* The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
* name and the definition of this function is included directly from
* 'arch/x86/util/unwind-libunwind.c', to make sure that this function
* is defined no matter what arch the host is.
*
* Finally, the arch specific unwind methods are exported which will
* be assigned to each x86 thread.
*/
#define REMOTE_UNWIND_LIBUNWIND
/* Define arch specific functions & regs for libunwind, should be
* defined before including "unwind.h"
*/
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__x86_reg_id(regnum)
#include "unwind.h"
#include "libunwind-x86.h"
#include <../../../../arch/x86/include/uapi/asm/perf_regs.h>
/* HAVE_ARCH_X86_64_SUPPORT is used in'arch/x86/util/unwind-libunwind.c'
* for x86_32, we undef it to compile code for x86_32 only.
*/
#undef HAVE_ARCH_X86_64_SUPPORT
#include "../../arch/x86/util/unwind-libunwind.c"
/* Explicitly define NO_LIBUNWIND_DEBUG_FRAME, because non-ARM has no
* dwarf_find_debug_frame() function.
*/
#ifndef NO_LIBUNWIND_DEBUG_FRAME
#define NO_LIBUNWIND_DEBUG_FRAME
#endif
#include "util/unwind-libunwind-local.c"
struct unwind_libunwind_ops *
x86_32_unwind_libunwind_ops = &_unwind_libunwind_ops;
| linux-master | tools/perf/util/libunwind/x86_32.c |
/*
* trace-event-python. Feed trace events to an embedded Python interpreter.
*
* Copyright (C) 2010 Tom Zanussi <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <Python.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/compiler.h>
#include <linux/time64.h>
#ifdef HAVE_LIBTRACEEVENT
#include <traceevent/event-parse.h>
#endif
#include "../build-id.h"
#include "../counts.h"
#include "../debug.h"
#include "../dso.h"
#include "../callchain.h"
#include "../env.h"
#include "../evsel.h"
#include "../event.h"
#include "../thread.h"
#include "../comm.h"
#include "../machine.h"
#include "../db-export.h"
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../call-path.h"
#include "map.h"
#include "symbol.h"
#include "thread_map.h"
#include "print_binary.h"
#include "stat.h"
#include "mem-events.h"
#include "util/perf_regs.h"
#if PY_MAJOR_VERSION < 3
#define _PyUnicode_FromString(arg) \
PyString_FromString(arg)
#define _PyUnicode_FromStringAndSize(arg1, arg2) \
PyString_FromStringAndSize((arg1), (arg2))
#define _PyBytes_FromStringAndSize(arg1, arg2) \
PyString_FromStringAndSize((arg1), (arg2))
#define _PyLong_FromLong(arg) \
PyInt_FromLong(arg)
#define _PyLong_AsLong(arg) \
PyInt_AsLong(arg)
#define _PyCapsule_New(arg1, arg2, arg3) \
PyCObject_FromVoidPtr((arg1), (arg2))
PyMODINIT_FUNC initperf_trace_context(void);
#else
#define _PyUnicode_FromString(arg) \
PyUnicode_FromString(arg)
#define _PyUnicode_FromStringAndSize(arg1, arg2) \
PyUnicode_FromStringAndSize((arg1), (arg2))
#define _PyBytes_FromStringAndSize(arg1, arg2) \
PyBytes_FromStringAndSize((arg1), (arg2))
#define _PyLong_FromLong(arg) \
PyLong_FromLong(arg)
#define _PyLong_AsLong(arg) \
PyLong_AsLong(arg)
#define _PyCapsule_New(arg1, arg2, arg3) \
PyCapsule_New((arg1), (arg2), (arg3))
PyMODINIT_FUNC PyInit_perf_trace_context(void);
#endif
#ifdef HAVE_LIBTRACEEVENT
#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(unsigned short) * 8)) - 1)
#define N_COMMON_FIELDS 7
static char *cur_field_name;
static int zero_flag_atom;
#endif
#define MAX_FIELDS 64
extern struct scripting_context *scripting_context;
static PyObject *main_module, *main_dict;
struct tables {
struct db_export dbe;
PyObject *evsel_handler;
PyObject *machine_handler;
PyObject *thread_handler;
PyObject *comm_handler;
PyObject *comm_thread_handler;
PyObject *dso_handler;
PyObject *symbol_handler;
PyObject *branch_type_handler;
PyObject *sample_handler;
PyObject *call_path_handler;
PyObject *call_return_handler;
PyObject *synth_handler;
PyObject *context_switch_handler;
bool db_export_mode;
};
static struct tables tables_global;
static void handler_call_die(const char *handler_name) __noreturn;
static void handler_call_die(const char *handler_name)
{
PyErr_Print();
Py_FatalError("problem in Python trace event handler");
// Py_FatalError does not return
// but we have to make the compiler happy
abort();
}
/*
* Insert val into the dictionary and decrement the reference counter.
* This is necessary for dictionaries since PyDict_SetItemString() does not
* steal a reference, as opposed to PyTuple_SetItem().
*/
static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
{
PyDict_SetItemString(dict, key, val);
Py_DECREF(val);
}
static PyObject *get_handler(const char *handler_name)
{
PyObject *handler;
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && !PyCallable_Check(handler))
return NULL;
return handler;
}
static void call_object(PyObject *handler, PyObject *args, const char *die_msg)
{
PyObject *retval;
retval = PyObject_CallObject(handler, args);
if (retval == NULL)
handler_call_die(die_msg);
Py_DECREF(retval);
}
static void try_call_object(const char *handler_name, PyObject *args)
{
PyObject *handler;
handler = get_handler(handler_name);
if (handler)
call_object(handler, args, handler_name);
}
#ifdef HAVE_LIBTRACEEVENT
static int get_argument_count(PyObject *handler)
{
int arg_count = 0;
/*
* The attribute for the code object is func_code in Python 2,
* whereas it is __code__ in Python 3.0+.
*/
PyObject *code_obj = PyObject_GetAttrString(handler,
"func_code");
if (PyErr_Occurred()) {
PyErr_Clear();
code_obj = PyObject_GetAttrString(handler,
"__code__");
}
PyErr_Clear();
if (code_obj) {
PyObject *arg_count_obj = PyObject_GetAttrString(code_obj,
"co_argcount");
if (arg_count_obj) {
arg_count = (int) _PyLong_AsLong(arg_count_obj);
Py_DECREF(arg_count_obj);
}
Py_DECREF(code_obj);
}
return arg_count;
}
static void define_value(enum tep_print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *field_value,
const char *field_str)
{
const char *handler_name = "define_flag_value";
PyObject *t;
unsigned long long value;
unsigned n = 0;
if (field_type == TEP_PRINT_SYMBOL)
handler_name = "define_symbolic_value";
t = PyTuple_New(4);
if (!t)
Py_FatalError("couldn't create Python tuple");
value = eval_flag(field_value);
PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name));
PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name));
PyTuple_SetItem(t, n++, _PyLong_FromLong(value));
PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_str));
try_call_object(handler_name, t);
Py_DECREF(t);
}
static void define_values(enum tep_print_arg_type field_type,
struct tep_print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
define_value(field_type, ev_name, field_name, field->value,
field->str);
if (field->next)
define_values(field_type, field->next, ev_name, field_name);
}
static void define_field(enum tep_print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *delim)
{
const char *handler_name = "define_flag_field";
PyObject *t;
unsigned n = 0;
if (field_type == TEP_PRINT_SYMBOL)
handler_name = "define_symbolic_field";
if (field_type == TEP_PRINT_FLAGS)
t = PyTuple_New(3);
else
t = PyTuple_New(2);
if (!t)
Py_FatalError("couldn't create Python tuple");
PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name));
PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name));
if (field_type == TEP_PRINT_FLAGS)
PyTuple_SetItem(t, n++, _PyUnicode_FromString(delim));
try_call_object(handler_name, t);
Py_DECREF(t);
}
static void define_event_symbols(struct tep_event *event,
const char *ev_name,
struct tep_print_arg *args)
{
if (args == NULL)
return;
switch (args->type) {
case TEP_PRINT_NULL:
break;
case TEP_PRINT_ATOM:
define_value(TEP_PRINT_FLAGS, ev_name, cur_field_name, "0",
args->atom.atom);
zero_flag_atom = 0;
break;
case TEP_PRINT_FIELD:
free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case TEP_PRINT_FLAGS:
define_event_symbols(event, ev_name, args->flags.field);
define_field(TEP_PRINT_FLAGS, ev_name, cur_field_name,
args->flags.delim);
define_values(TEP_PRINT_FLAGS, args->flags.flags, ev_name,
cur_field_name);
break;
case TEP_PRINT_SYMBOL:
define_event_symbols(event, ev_name, args->symbol.field);
define_field(TEP_PRINT_SYMBOL, ev_name, cur_field_name, NULL);
define_values(TEP_PRINT_SYMBOL, args->symbol.symbols, ev_name,
cur_field_name);
break;
case TEP_PRINT_HEX:
case TEP_PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
case TEP_PRINT_INT_ARRAY:
define_event_symbols(event, ev_name, args->int_array.field);
define_event_symbols(event, ev_name, args->int_array.count);
define_event_symbols(event, ev_name, args->int_array.el_size);
break;
case TEP_PRINT_STRING:
break;
case TEP_PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
break;
case TEP_PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
zero_flag_atom = 1;
define_event_symbols(event, ev_name, args->op.left);
define_event_symbols(event, ev_name, args->op.right);
break;
default:
/* gcc warns for these? */
case TEP_PRINT_BSTRING:
case TEP_PRINT_DYNAMIC_ARRAY:
case TEP_PRINT_DYNAMIC_ARRAY_LEN:
case TEP_PRINT_FUNC:
case TEP_PRINT_BITMASK:
/* we should warn... */
return;
}
if (args->next)
define_event_symbols(event, ev_name, args->next);
}
static PyObject *get_field_numeric_entry(struct tep_event *event,
struct tep_format_field *field, void *data)
{
bool is_array = field->flags & TEP_FIELD_IS_ARRAY;
PyObject *obj = NULL, *list = NULL;
unsigned long long val;
unsigned int item_size, n_items, i;
if (is_array) {
list = PyList_New(field->arraylen);
item_size = field->size / field->arraylen;
n_items = field->arraylen;
} else {
item_size = field->size;
n_items = 1;
}
for (i = 0; i < n_items; i++) {
val = read_size(event, data + field->offset + i * item_size,
item_size);
if (field->flags & TEP_FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
obj = _PyLong_FromLong(val);
else
obj = PyLong_FromLongLong(val);
} else {
if (val <= LONG_MAX)
obj = _PyLong_FromLong(val);
else
obj = PyLong_FromUnsignedLongLong(val);
}
if (is_array)
PyList_SET_ITEM(list, i, obj);
}
if (is_array)
obj = list;
return obj;
}
#endif
static const char *get_dsoname(struct map *map)
{
const char *dsoname = "[unknown]";
struct dso *dso = map ? map__dso(map) : NULL;
if (dso) {
if (symbol_conf.show_kernel_path && dso->long_name)
dsoname = dso->long_name;
else
dsoname = dso->name;
}
return dsoname;
}
static unsigned long get_offset(struct symbol *sym, struct addr_location *al)
{
unsigned long offset;
if (al->addr < sym->end)
offset = al->addr - sym->start;
else
offset = al->addr - map__start(al->map) - sym->start;
return offset;
}
static PyObject *python_process_callchain(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al)
{
PyObject *pylist;
struct callchain_cursor *cursor;
pylist = PyList_New(0);
if (!pylist)
Py_FatalError("couldn't create Python list");
if (!symbol_conf.use_callchain || !sample->callchain)
goto exit;
cursor = get_tls_callchain_cursor();
if (thread__resolve_callchain(al->thread, cursor, evsel,
sample, NULL, NULL,
scripting_max_stack) != 0) {
pr_err("Failed to resolve callchain. Skipping\n");
goto exit;
}
callchain_cursor_commit(cursor);
while (1) {
PyObject *pyelem;
struct callchain_cursor_node *node;
node = callchain_cursor_current(cursor);
if (!node)
break;
pyelem = PyDict_New();
if (!pyelem)
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(pyelem, "ip",
PyLong_FromUnsignedLongLong(node->ip));
if (node->ms.sym) {
PyObject *pysym = PyDict_New();
if (!pysym)
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(pysym, "start",
PyLong_FromUnsignedLongLong(node->ms.sym->start));
pydict_set_item_string_decref(pysym, "end",
PyLong_FromUnsignedLongLong(node->ms.sym->end));
pydict_set_item_string_decref(pysym, "binding",
_PyLong_FromLong(node->ms.sym->binding));
pydict_set_item_string_decref(pysym, "name",
_PyUnicode_FromStringAndSize(node->ms.sym->name,
node->ms.sym->namelen));
pydict_set_item_string_decref(pyelem, "sym", pysym);
if (node->ms.map) {
struct map *map = node->ms.map;
struct addr_location node_al;
unsigned long offset;
addr_location__init(&node_al);
node_al.addr = map__map_ip(map, node->ip);
node_al.map = map__get(map);
offset = get_offset(node->ms.sym, &node_al);
addr_location__exit(&node_al);
pydict_set_item_string_decref(
pyelem, "sym_off",
PyLong_FromUnsignedLongLong(offset));
}
if (node->srcline && strcmp(":0", node->srcline)) {
pydict_set_item_string_decref(
pyelem, "sym_srcline",
_PyUnicode_FromString(node->srcline));
}
}
if (node->ms.map) {
const char *dsoname = get_dsoname(node->ms.map);
pydict_set_item_string_decref(pyelem, "dso",
_PyUnicode_FromString(dsoname));
}
callchain_cursor_advance(cursor);
PyList_Append(pylist, pyelem);
Py_DECREF(pyelem);
}
exit:
return pylist;
}
static PyObject *python_process_brstack(struct perf_sample *sample,
struct thread *thread)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
PyObject *pylist;
u64 i;
pylist = PyList_New(0);
if (!pylist)
Py_FatalError("couldn't create Python list");
if (!(br && br->nr))
goto exit;
for (i = 0; i < br->nr; i++) {
PyObject *pyelem;
struct addr_location al;
const char *dsoname;
pyelem = PyDict_New();
if (!pyelem)
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(pyelem, "from",
PyLong_FromUnsignedLongLong(entries[i].from));
pydict_set_item_string_decref(pyelem, "to",
PyLong_FromUnsignedLongLong(entries[i].to));
pydict_set_item_string_decref(pyelem, "mispred",
PyBool_FromLong(entries[i].flags.mispred));
pydict_set_item_string_decref(pyelem, "predicted",
PyBool_FromLong(entries[i].flags.predicted));
pydict_set_item_string_decref(pyelem, "in_tx",
PyBool_FromLong(entries[i].flags.in_tx));
pydict_set_item_string_decref(pyelem, "abort",
PyBool_FromLong(entries[i].flags.abort));
pydict_set_item_string_decref(pyelem, "cycles",
PyLong_FromUnsignedLongLong(entries[i].flags.cycles));
addr_location__init(&al);
thread__find_map_fb(thread, sample->cpumode,
entries[i].from, &al);
dsoname = get_dsoname(al.map);
pydict_set_item_string_decref(pyelem, "from_dsoname",
_PyUnicode_FromString(dsoname));
thread__find_map_fb(thread, sample->cpumode,
entries[i].to, &al);
dsoname = get_dsoname(al.map);
pydict_set_item_string_decref(pyelem, "to_dsoname",
_PyUnicode_FromString(dsoname));
addr_location__exit(&al);
PyList_Append(pylist, pyelem);
Py_DECREF(pyelem);
}
exit:
return pylist;
}
static int get_symoff(struct symbol *sym, struct addr_location *al,
bool print_off, char *bf, int size)
{
unsigned long offset;
if (!sym || !sym->name[0])
return scnprintf(bf, size, "%s", "[unknown]");
if (!print_off)
return scnprintf(bf, size, "%s", sym->name);
offset = get_offset(sym, al);
return scnprintf(bf, size, "%s+0x%x", sym->name, offset);
}
static int get_br_mspred(struct branch_flags *flags, char *bf, int size)
{
if (!flags->mispred && !flags->predicted)
return scnprintf(bf, size, "%s", "-");
if (flags->mispred)
return scnprintf(bf, size, "%s", "M");
return scnprintf(bf, size, "%s", "P");
}
static PyObject *python_process_brstacksym(struct perf_sample *sample,
struct thread *thread)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
PyObject *pylist;
u64 i;
char bf[512];
pylist = PyList_New(0);
if (!pylist)
Py_FatalError("couldn't create Python list");
if (!(br && br->nr))
goto exit;
for (i = 0; i < br->nr; i++) {
PyObject *pyelem;
struct addr_location al;
addr_location__init(&al);
pyelem = PyDict_New();
if (!pyelem)
Py_FatalError("couldn't create Python dictionary");
thread__find_symbol_fb(thread, sample->cpumode,
entries[i].from, &al);
get_symoff(al.sym, &al, true, bf, sizeof(bf));
pydict_set_item_string_decref(pyelem, "from",
_PyUnicode_FromString(bf));
thread__find_symbol_fb(thread, sample->cpumode,
entries[i].to, &al);
get_symoff(al.sym, &al, true, bf, sizeof(bf));
pydict_set_item_string_decref(pyelem, "to",
_PyUnicode_FromString(bf));
get_br_mspred(&entries[i].flags, bf, sizeof(bf));
pydict_set_item_string_decref(pyelem, "pred",
_PyUnicode_FromString(bf));
if (entries[i].flags.in_tx) {
pydict_set_item_string_decref(pyelem, "in_tx",
_PyUnicode_FromString("X"));
} else {
pydict_set_item_string_decref(pyelem, "in_tx",
_PyUnicode_FromString("-"));
}
if (entries[i].flags.abort) {
pydict_set_item_string_decref(pyelem, "abort",
_PyUnicode_FromString("A"));
} else {
pydict_set_item_string_decref(pyelem, "abort",
_PyUnicode_FromString("-"));
}
PyList_Append(pylist, pyelem);
Py_DECREF(pyelem);
addr_location__exit(&al);
}
exit:
return pylist;
}
static PyObject *get_sample_value_as_tuple(struct sample_read_value *value,
u64 read_format)
{
PyObject *t;
t = PyTuple_New(3);
if (!t)
Py_FatalError("couldn't create Python tuple");
PyTuple_SetItem(t, 0, PyLong_FromUnsignedLongLong(value->id));
PyTuple_SetItem(t, 1, PyLong_FromUnsignedLongLong(value->value));
if (read_format & PERF_FORMAT_LOST)
PyTuple_SetItem(t, 2, PyLong_FromUnsignedLongLong(value->lost));
return t;
}
static void set_sample_read_in_dict(PyObject *dict_sample,
struct perf_sample *sample,
struct evsel *evsel)
{
u64 read_format = evsel->core.attr.read_format;
PyObject *values;
unsigned int i;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
pydict_set_item_string_decref(dict_sample, "time_enabled",
PyLong_FromUnsignedLongLong(sample->read.time_enabled));
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
pydict_set_item_string_decref(dict_sample, "time_running",
PyLong_FromUnsignedLongLong(sample->read.time_running));
}
if (read_format & PERF_FORMAT_GROUP)
values = PyList_New(sample->read.group.nr);
else
values = PyList_New(1);
if (!values)
Py_FatalError("couldn't create Python list");
if (read_format & PERF_FORMAT_GROUP) {
struct sample_read_value *v = sample->read.group.values;
i = 0;
sample_read_group__for_each(v, sample->read.group.nr, read_format) {
PyObject *t = get_sample_value_as_tuple(v, read_format);
PyList_SET_ITEM(values, i, t);
i++;
}
} else {
PyObject *t = get_sample_value_as_tuple(&sample->read.one,
read_format);
PyList_SET_ITEM(values, 0, t);
}
pydict_set_item_string_decref(dict_sample, "values", values);
}
static void set_sample_datasrc_in_dict(PyObject *dict,
struct perf_sample *sample)
{
struct mem_info mi = { .data_src.val = sample->data_src };
char decode[100];
pydict_set_item_string_decref(dict, "datasrc",
PyLong_FromUnsignedLongLong(sample->data_src));
perf_script__meminfo_scnprintf(decode, 100, &mi);
pydict_set_item_string_decref(dict, "datasrc_decode",
_PyUnicode_FromString(decode));
}
static void regs_map(struct regs_dump *regs, uint64_t mask, const char *arch, char *bf, int size)
{
unsigned int i = 0, r;
int printed = 0;
bf[0] = 0;
if (size <= 0)
return;
if (!regs || !regs->regs)
return;
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++];
printed += scnprintf(bf + printed, size - printed,
"%5s:0x%" PRIx64 " ",
perf_reg_name(r, arch), val);
}
}
static void set_regs_in_dict(PyObject *dict,
struct perf_sample *sample,
struct evsel *evsel)
{
struct perf_event_attr *attr = &evsel->core.attr;
const char *arch = perf_env__arch(evsel__env(evsel));
/*
* Here value 28 is a constant size which can be used to print
* one register value and its corresponds to:
* 16 chars is to specify 64 bit register in hexadecimal.
* 2 chars is for appending "0x" to the hexadecimal value and
* 10 chars is for register name.
*/
int size = __sw_hweight64(attr->sample_regs_intr) * 28;
char *bf = malloc(size);
regs_map(&sample->intr_regs, attr->sample_regs_intr, arch, bf, size);
pydict_set_item_string_decref(dict, "iregs",
_PyUnicode_FromString(bf));
regs_map(&sample->user_regs, attr->sample_regs_user, arch, bf, size);
pydict_set_item_string_decref(dict, "uregs",
_PyUnicode_FromString(bf));
free(bf);
}
static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
const char *dso_field, const char *dso_bid_field,
const char *dso_map_start, const char *dso_map_end,
const char *sym_field, const char *symoff_field)
{
char sbuild_id[SBUILD_ID_SIZE];
if (al->map) {
struct dso *dso = map__dso(al->map);
pydict_set_item_string_decref(dict, dso_field, _PyUnicode_FromString(dso->name));
build_id__sprintf(&dso->bid, sbuild_id);
pydict_set_item_string_decref(dict, dso_bid_field,
_PyUnicode_FromString(sbuild_id));
pydict_set_item_string_decref(dict, dso_map_start,
PyLong_FromUnsignedLong(map__start(al->map)));
pydict_set_item_string_decref(dict, dso_map_end,
PyLong_FromUnsignedLong(map__end(al->map)));
}
if (al->sym) {
pydict_set_item_string_decref(dict, sym_field,
_PyUnicode_FromString(al->sym->name));
pydict_set_item_string_decref(dict, symoff_field,
PyLong_FromUnsignedLong(get_offset(al->sym, al)));
}
}
static void set_sample_flags(PyObject *dict, u32 flags)
{
const char *ch = PERF_IP_FLAG_CHARS;
char *p, str[33];
for (p = str; *ch; ch++, flags >>= 1) {
if (flags & 1)
*p++ = *ch;
}
*p = 0;
pydict_set_item_string_decref(dict, "flags", _PyUnicode_FromString(str));
}
static void python_process_sample_flags(struct perf_sample *sample, PyObject *dict_sample)
{
char flags_disp[SAMPLE_FLAGS_BUF_SIZE];
set_sample_flags(dict_sample, sample->flags);
perf_sample__sprintf_flags(sample->flags, flags_disp, sizeof(flags_disp));
pydict_set_item_string_decref(dict_sample, "flags_disp",
_PyUnicode_FromString(flags_disp));
}
static PyObject *get_perf_sample_dict(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
struct addr_location *addr_al,
PyObject *callchain)
{
PyObject *dict, *dict_sample, *brstack, *brstacksym;
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dictionary");
dict_sample = PyDict_New();
if (!dict_sample)
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(evsel__name(evsel)));
pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->core.attr, sizeof(evsel->core.attr)));
pydict_set_item_string_decref(dict_sample, "pid",
_PyLong_FromLong(sample->pid));
pydict_set_item_string_decref(dict_sample, "tid",
_PyLong_FromLong(sample->tid));
pydict_set_item_string_decref(dict_sample, "cpu",
_PyLong_FromLong(sample->cpu));
pydict_set_item_string_decref(dict_sample, "ip",
PyLong_FromUnsignedLongLong(sample->ip));
pydict_set_item_string_decref(dict_sample, "time",
PyLong_FromUnsignedLongLong(sample->time));
pydict_set_item_string_decref(dict_sample, "period",
PyLong_FromUnsignedLongLong(sample->period));
pydict_set_item_string_decref(dict_sample, "phys_addr",
PyLong_FromUnsignedLongLong(sample->phys_addr));
pydict_set_item_string_decref(dict_sample, "addr",
PyLong_FromUnsignedLongLong(sample->addr));
set_sample_read_in_dict(dict_sample, sample, evsel);
pydict_set_item_string_decref(dict_sample, "weight",
PyLong_FromUnsignedLongLong(sample->weight));
pydict_set_item_string_decref(dict_sample, "transaction",
PyLong_FromUnsignedLongLong(sample->transaction));
set_sample_datasrc_in_dict(dict_sample, sample);
pydict_set_item_string_decref(dict, "sample", dict_sample);
pydict_set_item_string_decref(dict, "raw_buf", _PyBytes_FromStringAndSize(
(const char *)sample->raw_data, sample->raw_size));
pydict_set_item_string_decref(dict, "comm",
_PyUnicode_FromString(thread__comm_str(al->thread)));
set_sym_in_dict(dict, al, "dso", "dso_bid", "dso_map_start", "dso_map_end",
"symbol", "symoff");
pydict_set_item_string_decref(dict, "callchain", callchain);
brstack = python_process_brstack(sample, al->thread);
pydict_set_item_string_decref(dict, "brstack", brstack);
brstacksym = python_process_brstacksym(sample, al->thread);
pydict_set_item_string_decref(dict, "brstacksym", brstacksym);
if (sample->machine_pid) {
pydict_set_item_string_decref(dict_sample, "machine_pid",
_PyLong_FromLong(sample->machine_pid));
pydict_set_item_string_decref(dict_sample, "vcpu",
_PyLong_FromLong(sample->vcpu));
}
pydict_set_item_string_decref(dict_sample, "cpumode",
_PyLong_FromLong((unsigned long)sample->cpumode));
if (addr_al) {
pydict_set_item_string_decref(dict_sample, "addr_correlates_sym",
PyBool_FromLong(1));
set_sym_in_dict(dict_sample, addr_al, "addr_dso", "addr_dso_bid",
"addr_dso_map_start", "addr_dso_map_end",
"addr_symbol", "addr_symoff");
}
if (sample->flags)
python_process_sample_flags(sample, dict_sample);
/* Instructions per cycle (IPC) */
if (sample->insn_cnt && sample->cyc_cnt) {
pydict_set_item_string_decref(dict_sample, "insn_cnt",
PyLong_FromUnsignedLongLong(sample->insn_cnt));
pydict_set_item_string_decref(dict_sample, "cyc_cnt",
PyLong_FromUnsignedLongLong(sample->cyc_cnt));
}
set_regs_in_dict(dict, sample, evsel);
return dict;
}
#ifdef HAVE_LIBTRACEEVENT
static void python_process_tracepoint(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
struct addr_location *addr_al)
{
struct tep_event *event = evsel->tp_format;
PyObject *handler, *context, *t, *obj = NULL, *callchain;
PyObject *dict = NULL, *all_entries_dict = NULL;
static char handler_name[256];
struct tep_format_field *field;
unsigned long s, ns;
unsigned n = 0;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
const char *comm = thread__comm_str(al->thread);
const char *default_handler_name = "trace_unhandled";
DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX);
bitmap_zero(events_defined, TRACE_EVENT_TYPE_MAX);
if (!event) {
snprintf(handler_name, sizeof(handler_name),
"ug! no event found for type %" PRIu64, (u64)evsel->core.attr.config);
Py_FatalError(handler_name);
}
pid = raw_field_value(event, "common_pid", data);
sprintf(handler_name, "%s__%s", event->system, event->name);
if (!__test_and_set_bit(event->id, events_defined))
define_event_symbols(event, handler_name, event->print_fmt.args);
handler = get_handler(handler_name);
if (!handler) {
handler = get_handler(default_handler_name);
if (!handler)
return;
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dict");
}
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
s = nsecs / NSEC_PER_SEC;
ns = nsecs - s * NSEC_PER_SEC;
context = _PyCapsule_New(scripting_context, NULL, NULL);
PyTuple_SetItem(t, n++, _PyUnicode_FromString(handler_name));
PyTuple_SetItem(t, n++, context);
/* ip unwinding */
callchain = python_process_callchain(sample, evsel, al);
/* Need an additional reference for the perf_sample dict */
Py_INCREF(callchain);
if (!dict) {
PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
PyTuple_SetItem(t, n++, _PyLong_FromLong(s));
PyTuple_SetItem(t, n++, _PyLong_FromLong(ns));
PyTuple_SetItem(t, n++, _PyLong_FromLong(pid));
PyTuple_SetItem(t, n++, _PyUnicode_FromString(comm));
PyTuple_SetItem(t, n++, callchain);
} else {
pydict_set_item_string_decref(dict, "common_cpu", _PyLong_FromLong(cpu));
pydict_set_item_string_decref(dict, "common_s", _PyLong_FromLong(s));
pydict_set_item_string_decref(dict, "common_ns", _PyLong_FromLong(ns));
pydict_set_item_string_decref(dict, "common_pid", _PyLong_FromLong(pid));
pydict_set_item_string_decref(dict, "common_comm", _PyUnicode_FromString(comm));
pydict_set_item_string_decref(dict, "common_callchain", callchain);
}
for (field = event->format.fields; field; field = field->next) {
unsigned int offset, len;
unsigned long long val;
if (field->flags & TEP_FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
val = tep_read_number(scripting_context->pevent,
data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
if (tep_field_is_relative(field->flags))
offset += field->offset + field->size;
}
if (field->flags & TEP_FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
obj = _PyUnicode_FromString((char *) data + offset);
} else {
obj = PyByteArray_FromStringAndSize((const char *) data + offset, len);
field->flags &= ~TEP_FIELD_IS_STRING;
}
} else { /* FIELD_IS_NUMERIC */
obj = get_field_numeric_entry(event, field, data);
}
if (!dict)
PyTuple_SetItem(t, n++, obj);
else
pydict_set_item_string_decref(dict, field->name, obj);
}
if (dict)
PyTuple_SetItem(t, n++, dict);
if (get_argument_count(handler) == (int) n + 1) {
all_entries_dict = get_perf_sample_dict(sample, evsel, al, addr_al,
callchain);
PyTuple_SetItem(t, n++, all_entries_dict);
} else {
Py_DECREF(callchain);
}
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
if (!dict)
call_object(handler, t, handler_name);
else
call_object(handler, t, default_handler_name);
Py_DECREF(t);
}
#else
static void python_process_tracepoint(struct perf_sample *sample __maybe_unused,
struct evsel *evsel __maybe_unused,
struct addr_location *al __maybe_unused,
struct addr_location *addr_al __maybe_unused)
{
fprintf(stderr, "Tracepoint events are not supported because "
"perf is not linked with libtraceevent.\n");
}
#endif
static PyObject *tuple_new(unsigned int sz)
{
PyObject *t;
t = PyTuple_New(sz);
if (!t)
Py_FatalError("couldn't create Python tuple");
return t;
}
static int tuple_set_s64(PyObject *t, unsigned int pos, s64 val)
{
#if BITS_PER_LONG == 64
return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
#endif
#if BITS_PER_LONG == 32
return PyTuple_SetItem(t, pos, PyLong_FromLongLong(val));
#endif
}
/*
* Databases support only signed 64-bit numbers, so even though we are
* exporting a u64, it must be as s64.
*/
#define tuple_set_d64 tuple_set_s64
static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
{
#if BITS_PER_LONG == 64
return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
#endif
#if BITS_PER_LONG == 32
return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLongLong(val));
#endif
}
static int tuple_set_u32(PyObject *t, unsigned int pos, u32 val)
{
return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
}
static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
{
return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
}
static int tuple_set_bool(PyObject *t, unsigned int pos, bool val)
{
return PyTuple_SetItem(t, pos, PyBool_FromLong(val));
}
static int tuple_set_string(PyObject *t, unsigned int pos, const char *s)
{
return PyTuple_SetItem(t, pos, _PyUnicode_FromString(s));
}
static int tuple_set_bytes(PyObject *t, unsigned int pos, void *bytes,
unsigned int sz)
{
return PyTuple_SetItem(t, pos, _PyBytes_FromStringAndSize(bytes, sz));
}
static int python_export_evsel(struct db_export *dbe, struct evsel *evsel)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(2);
tuple_set_d64(t, 0, evsel->db_id);
tuple_set_string(t, 1, evsel__name(evsel));
call_object(tables->evsel_handler, t, "evsel_table");
Py_DECREF(t);
return 0;
}
static int python_export_machine(struct db_export *dbe,
struct machine *machine)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(3);
tuple_set_d64(t, 0, machine->db_id);
tuple_set_s32(t, 1, machine->pid);
tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
call_object(tables->machine_handler, t, "machine_table");
Py_DECREF(t);
return 0;
}
static int python_export_thread(struct db_export *dbe, struct thread *thread,
u64 main_thread_db_id, struct machine *machine)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(5);
tuple_set_d64(t, 0, thread__db_id(thread));
tuple_set_d64(t, 1, machine->db_id);
tuple_set_d64(t, 2, main_thread_db_id);
tuple_set_s32(t, 3, thread__pid(thread));
tuple_set_s32(t, 4, thread__tid(thread));
call_object(tables->thread_handler, t, "thread_table");
Py_DECREF(t);
return 0;
}
static int python_export_comm(struct db_export *dbe, struct comm *comm,
struct thread *thread)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(5);
tuple_set_d64(t, 0, comm->db_id);
tuple_set_string(t, 1, comm__str(comm));
tuple_set_d64(t, 2, thread__db_id(thread));
tuple_set_d64(t, 3, comm->start);
tuple_set_s32(t, 4, comm->exec);
call_object(tables->comm_handler, t, "comm_table");
Py_DECREF(t);
return 0;
}
static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
struct comm *comm, struct thread *thread)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(3);
tuple_set_d64(t, 0, db_id);
tuple_set_d64(t, 1, comm->db_id);
tuple_set_d64(t, 2, thread__db_id(thread));
call_object(tables->comm_thread_handler, t, "comm_thread_table");
Py_DECREF(t);
return 0;
}
static int python_export_dso(struct db_export *dbe, struct dso *dso,
struct machine *machine)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
char sbuild_id[SBUILD_ID_SIZE];
PyObject *t;
build_id__sprintf(&dso->bid, sbuild_id);
t = tuple_new(5);
tuple_set_d64(t, 0, dso->db_id);
tuple_set_d64(t, 1, machine->db_id);
tuple_set_string(t, 2, dso->short_name);
tuple_set_string(t, 3, dso->long_name);
tuple_set_string(t, 4, sbuild_id);
call_object(tables->dso_handler, t, "dso_table");
Py_DECREF(t);
return 0;
}
static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
struct dso *dso)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
u64 *sym_db_id = symbol__priv(sym);
PyObject *t;
t = tuple_new(6);
tuple_set_d64(t, 0, *sym_db_id);
tuple_set_d64(t, 1, dso->db_id);
tuple_set_d64(t, 2, sym->start);
tuple_set_d64(t, 3, sym->end);
tuple_set_s32(t, 4, sym->binding);
tuple_set_string(t, 5, sym->name);
call_object(tables->symbol_handler, t, "symbol_table");
Py_DECREF(t);
return 0;
}
static int python_export_branch_type(struct db_export *dbe, u32 branch_type,
const char *name)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(2);
tuple_set_s32(t, 0, branch_type);
tuple_set_string(t, 1, name);
call_object(tables->branch_type_handler, t, "branch_type_table");
Py_DECREF(t);
return 0;
}
static void python_export_sample_table(struct db_export *dbe,
struct export_sample *es)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(25);
tuple_set_d64(t, 0, es->db_id);
tuple_set_d64(t, 1, es->evsel->db_id);
tuple_set_d64(t, 2, maps__machine(es->al->maps)->db_id);
tuple_set_d64(t, 3, thread__db_id(es->al->thread));
tuple_set_d64(t, 4, es->comm_db_id);
tuple_set_d64(t, 5, es->dso_db_id);
tuple_set_d64(t, 6, es->sym_db_id);
tuple_set_d64(t, 7, es->offset);
tuple_set_d64(t, 8, es->sample->ip);
tuple_set_d64(t, 9, es->sample->time);
tuple_set_s32(t, 10, es->sample->cpu);
tuple_set_d64(t, 11, es->addr_dso_db_id);
tuple_set_d64(t, 12, es->addr_sym_db_id);
tuple_set_d64(t, 13, es->addr_offset);
tuple_set_d64(t, 14, es->sample->addr);
tuple_set_d64(t, 15, es->sample->period);
tuple_set_d64(t, 16, es->sample->weight);
tuple_set_d64(t, 17, es->sample->transaction);
tuple_set_d64(t, 18, es->sample->data_src);
tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
tuple_set_d64(t, 21, es->call_path_id);
tuple_set_d64(t, 22, es->sample->insn_cnt);
tuple_set_d64(t, 23, es->sample->cyc_cnt);
tuple_set_s32(t, 24, es->sample->flags);
call_object(tables->sample_handler, t, "sample_table");
Py_DECREF(t);
}
static void python_export_synth(struct db_export *dbe, struct export_sample *es)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(3);
tuple_set_d64(t, 0, es->db_id);
tuple_set_d64(t, 1, es->evsel->core.attr.config);
tuple_set_bytes(t, 2, es->sample->raw_data, es->sample->raw_size);
call_object(tables->synth_handler, t, "synth_data");
Py_DECREF(t);
}
static int python_export_sample(struct db_export *dbe,
struct export_sample *es)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
python_export_sample_table(dbe, es);
if (es->evsel->core.attr.type == PERF_TYPE_SYNTH && tables->synth_handler)
python_export_synth(dbe, es);
return 0;
}
static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
u64 parent_db_id, sym_db_id;
parent_db_id = cp->parent ? cp->parent->db_id : 0;
sym_db_id = cp->sym ? *(u64 *)symbol__priv(cp->sym) : 0;
t = tuple_new(4);
tuple_set_d64(t, 0, cp->db_id);
tuple_set_d64(t, 1, parent_db_id);
tuple_set_d64(t, 2, sym_db_id);
tuple_set_d64(t, 3, cp->ip);
call_object(tables->call_path_handler, t, "call_path_table");
Py_DECREF(t);
return 0;
}
static int python_export_call_return(struct db_export *dbe,
struct call_return *cr)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
t = tuple_new(14);
tuple_set_d64(t, 0, cr->db_id);
tuple_set_d64(t, 1, thread__db_id(cr->thread));
tuple_set_d64(t, 2, comm_db_id);
tuple_set_d64(t, 3, cr->cp->db_id);
tuple_set_d64(t, 4, cr->call_time);
tuple_set_d64(t, 5, cr->return_time);
tuple_set_d64(t, 6, cr->branch_count);
tuple_set_d64(t, 7, cr->call_ref);
tuple_set_d64(t, 8, cr->return_ref);
tuple_set_d64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
tuple_set_d64(t, 11, cr->parent_db_id);
tuple_set_d64(t, 12, cr->insn_count);
tuple_set_d64(t, 13, cr->cyc_count);
call_object(tables->call_return_handler, t, "call_return_table");
Py_DECREF(t);
return 0;
}
static int python_export_context_switch(struct db_export *dbe, u64 db_id,
struct machine *machine,
struct perf_sample *sample,
u64 th_out_id, u64 comm_out_id,
u64 th_in_id, u64 comm_in_id, int flags)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(9);
tuple_set_d64(t, 0, db_id);
tuple_set_d64(t, 1, machine->db_id);
tuple_set_d64(t, 2, sample->time);
tuple_set_s32(t, 3, sample->cpu);
tuple_set_d64(t, 4, th_out_id);
tuple_set_d64(t, 5, comm_out_id);
tuple_set_d64(t, 6, th_in_id);
tuple_set_d64(t, 7, comm_in_id);
tuple_set_s32(t, 8, flags);
call_object(tables->context_switch_handler, t, "context_switch");
Py_DECREF(t);
return 0;
}
static int python_process_call_return(struct call_return *cr, u64 *parent_db_id,
void *data)
{
struct db_export *dbe = data;
return db_export__call_return(dbe, cr, parent_db_id);
}
static void python_process_general_event(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
struct addr_location *addr_al)
{
PyObject *handler, *t, *dict, *callchain;
static char handler_name[64];
unsigned n = 0;
snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
handler = get_handler(handler_name);
if (!handler)
return;
/*
* Use the MAX_FIELDS to make the function expandable, though
* currently there is only one item for the tuple.
*/
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
/* ip unwinding */
callchain = python_process_callchain(sample, evsel, al);
dict = get_perf_sample_dict(sample, evsel, al, addr_al, callchain);
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static void python_process_event(union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
struct addr_location *addr_al)
{
struct tables *tables = &tables_global;
scripting_context__update(scripting_context, event, sample, evsel, al, addr_al);
switch (evsel->core.attr.type) {
case PERF_TYPE_TRACEPOINT:
python_process_tracepoint(sample, evsel, al, addr_al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
if (tables->db_export_mode)
db_export__sample(&tables->dbe, event, sample, evsel, al, addr_al);
else
python_process_general_event(sample, evsel, al, addr_al);
}
}
static void python_process_throttle(union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
const char *handler_name;
PyObject *handler, *t;
if (event->header.type == PERF_RECORD_THROTTLE)
handler_name = "throttle";
else
handler_name = "unthrottle";
handler = get_handler(handler_name);
if (!handler)
return;
t = tuple_new(6);
if (!t)
return;
tuple_set_u64(t, 0, event->throttle.time);
tuple_set_u64(t, 1, event->throttle.id);
tuple_set_u64(t, 2, event->throttle.stream_id);
tuple_set_s32(t, 3, sample->cpu);
tuple_set_s32(t, 4, sample->pid);
tuple_set_s32(t, 5, sample->tid);
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static void python_do_process_switch(union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
const char *handler_name = "context_switch";
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
bool out_preempt = out && (event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT);
pid_t np_pid = -1, np_tid = -1;
PyObject *handler, *t;
handler = get_handler(handler_name);
if (!handler)
return;
if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
np_pid = event->context_switch.next_prev_pid;
np_tid = event->context_switch.next_prev_tid;
}
t = tuple_new(11);
if (!t)
return;
tuple_set_u64(t, 0, sample->time);
tuple_set_s32(t, 1, sample->cpu);
tuple_set_s32(t, 2, sample->pid);
tuple_set_s32(t, 3, sample->tid);
tuple_set_s32(t, 4, np_pid);
tuple_set_s32(t, 5, np_tid);
tuple_set_s32(t, 6, machine->pid);
tuple_set_bool(t, 7, out);
tuple_set_bool(t, 8, out_preempt);
tuple_set_s32(t, 9, sample->machine_pid);
tuple_set_s32(t, 10, sample->vcpu);
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static void python_process_switch(union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct tables *tables = &tables_global;
if (tables->db_export_mode)
db_export__switch(&tables->dbe, event, sample, machine);
else
python_do_process_switch(event, sample, machine);
}
static void python_process_auxtrace_error(struct perf_session *session __maybe_unused,
union perf_event *event)
{
struct perf_record_auxtrace_error *e = &event->auxtrace_error;
u8 cpumode = e->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
const char *handler_name = "auxtrace_error";
unsigned long long tm = e->time;
const char *msg = e->msg;
PyObject *handler, *t;
handler = get_handler(handler_name);
if (!handler)
return;
if (!e->fmt) {
tm = 0;
msg = (const char *)&e->time;
}
t = tuple_new(11);
tuple_set_u32(t, 0, e->type);
tuple_set_u32(t, 1, e->code);
tuple_set_s32(t, 2, e->cpu);
tuple_set_s32(t, 3, e->pid);
tuple_set_s32(t, 4, e->tid);
tuple_set_u64(t, 5, e->ip);
tuple_set_u64(t, 6, tm);
tuple_set_string(t, 7, msg);
tuple_set_u32(t, 8, cpumode);
tuple_set_s32(t, 9, e->machine_pid);
tuple_set_s32(t, 10, e->vcpu);
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static void get_handler_name(char *str, size_t size,
struct evsel *evsel)
{
char *p = str;
scnprintf(str, size, "stat__%s", evsel__name(evsel));
while ((p = strchr(p, ':'))) {
*p = '_';
p++;
}
}
static void
process_stat(struct evsel *counter, struct perf_cpu cpu, int thread, u64 tstamp,
struct perf_counts_values *count)
{
PyObject *handler, *t;
static char handler_name[256];
int n = 0;
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
get_handler_name(handler_name, sizeof(handler_name),
counter);
handler = get_handler(handler_name);
if (!handler) {
pr_debug("can't find python handler %s\n", handler_name);
return;
}
PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu.cpu));
PyTuple_SetItem(t, n++, _PyLong_FromLong(thread));
tuple_set_u64(t, n++, tstamp);
tuple_set_u64(t, n++, count->val);
tuple_set_u64(t, n++, count->ena);
tuple_set_u64(t, n++, count->run);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static void python_process_stat(struct perf_stat_config *config,
struct evsel *counter, u64 tstamp)
{
struct perf_thread_map *threads = counter->core.threads;
struct perf_cpu_map *cpus = counter->core.cpus;
int cpu, thread;
for (thread = 0; thread < perf_thread_map__nr(threads); thread++) {
for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
process_stat(counter, perf_cpu_map__cpu(cpus, cpu),
perf_thread_map__pid(threads, thread), tstamp,
perf_counts(counter->counts, cpu, thread));
}
}
}
static void python_process_stat_interval(u64 tstamp)
{
PyObject *handler, *t;
static const char handler_name[] = "stat__interval";
int n = 0;
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
handler = get_handler(handler_name);
if (!handler) {
pr_debug("can't find python handler %s\n", handler_name);
return;
}
tuple_set_u64(t, n++, tstamp);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
call_object(handler, t, handler_name);
Py_DECREF(t);
}
static int perf_script_context_init(void)
{
PyObject *perf_script_context;
PyObject *perf_trace_context;
PyObject *dict;
int ret;
perf_trace_context = PyImport_AddModule("perf_trace_context");
if (!perf_trace_context)
return -1;
dict = PyModule_GetDict(perf_trace_context);
if (!dict)
return -1;
perf_script_context = _PyCapsule_New(scripting_context, NULL, NULL);
if (!perf_script_context)
return -1;
ret = PyDict_SetItemString(dict, "perf_script_context", perf_script_context);
if (!ret)
ret = PyDict_SetItemString(main_dict, "perf_script_context", perf_script_context);
Py_DECREF(perf_script_context);
return ret;
}
static int run_start_sub(void)
{
main_module = PyImport_AddModule("__main__");
if (main_module == NULL)
return -1;
Py_INCREF(main_module);
main_dict = PyModule_GetDict(main_module);
if (main_dict == NULL)
goto error;
Py_INCREF(main_dict);
if (perf_script_context_init())
goto error;
try_call_object("trace_begin", NULL);
return 0;
error:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
return -1;
}
#define SET_TABLE_HANDLER_(name, handler_name, table_name) do { \
tables->handler_name = get_handler(#table_name); \
if (tables->handler_name) \
tables->dbe.export_ ## name = python_export_ ## name; \
} while (0)
#define SET_TABLE_HANDLER(name) \
SET_TABLE_HANDLER_(name, name ## _handler, name ## _table)
static void set_table_handlers(struct tables *tables)
{
const char *perf_db_export_mode = "perf_db_export_mode";
const char *perf_db_export_calls = "perf_db_export_calls";
const char *perf_db_export_callchains = "perf_db_export_callchains";
PyObject *db_export_mode, *db_export_calls, *db_export_callchains;
bool export_calls = false;
bool export_callchains = false;
int ret;
memset(tables, 0, sizeof(struct tables));
if (db_export__init(&tables->dbe))
Py_FatalError("failed to initialize export");
db_export_mode = PyDict_GetItemString(main_dict, perf_db_export_mode);
if (!db_export_mode)
return;
ret = PyObject_IsTrue(db_export_mode);
if (ret == -1)
handler_call_die(perf_db_export_mode);
if (!ret)
return;
/* handle export calls */
tables->dbe.crp = NULL;
db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls);
if (db_export_calls) {
ret = PyObject_IsTrue(db_export_calls);
if (ret == -1)
handler_call_die(perf_db_export_calls);
export_calls = !!ret;
}
if (export_calls) {
tables->dbe.crp =
call_return_processor__new(python_process_call_return,
&tables->dbe);
if (!tables->dbe.crp)
Py_FatalError("failed to create calls processor");
}
/* handle export callchains */
tables->dbe.cpr = NULL;
db_export_callchains = PyDict_GetItemString(main_dict,
perf_db_export_callchains);
if (db_export_callchains) {
ret = PyObject_IsTrue(db_export_callchains);
if (ret == -1)
handler_call_die(perf_db_export_callchains);
export_callchains = !!ret;
}
if (export_callchains) {
/*
* Attempt to use the call path root from the call return
* processor, if the call return processor is in use. Otherwise,
* we allocate a new call path root. This prevents exporting
* duplicate call path ids when both are in use simultaneously.
*/
if (tables->dbe.crp)
tables->dbe.cpr = tables->dbe.crp->cpr;
else
tables->dbe.cpr = call_path_root__new();
if (!tables->dbe.cpr)
Py_FatalError("failed to create call path root");
}
tables->db_export_mode = true;
/*
* Reserve per symbol space for symbol->db_id via symbol__priv()
*/
symbol_conf.priv_size = sizeof(u64);
SET_TABLE_HANDLER(evsel);
SET_TABLE_HANDLER(machine);
SET_TABLE_HANDLER(thread);
SET_TABLE_HANDLER(comm);
SET_TABLE_HANDLER(comm_thread);
SET_TABLE_HANDLER(dso);
SET_TABLE_HANDLER(symbol);
SET_TABLE_HANDLER(branch_type);
SET_TABLE_HANDLER(sample);
SET_TABLE_HANDLER(call_path);
SET_TABLE_HANDLER(call_return);
SET_TABLE_HANDLER(context_switch);
/*
* Synthesized events are samples but with architecture-specific data
* stored in sample->raw_data. They are exported via
* python_export_sample() and consequently do not need a separate export
* callback.
*/
tables->synth_handler = get_handler("synth_data");
}
#if PY_MAJOR_VERSION < 3
static void _free_command_line(const char **command_line, int num)
{
free(command_line);
}
#else
static void _free_command_line(wchar_t **command_line, int num)
{
int i;
for (i = 0; i < num; i++)
PyMem_RawFree(command_line[i]);
free(command_line);
}
#endif
/*
* Start trace script
*/
static int python_start_script(const char *script, int argc, const char **argv,
struct perf_session *session)
{
struct tables *tables = &tables_global;
#if PY_MAJOR_VERSION < 3
const char **command_line;
#else
wchar_t **command_line;
#endif
/*
* Use a non-const name variable to cope with python 2.6's
* PyImport_AppendInittab prototype
*/
char buf[PATH_MAX], name[19] = "perf_trace_context";
int i, err = 0;
FILE *fp;
scripting_context->session = session;
#if PY_MAJOR_VERSION < 3
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
PyImport_AppendInittab(name, initperf_trace_context);
#else
command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
PyImport_AppendInittab(name, PyInit_perf_trace_context);
#endif
Py_Initialize();
#if PY_MAJOR_VERSION < 3
PySys_SetArgv(argc + 1, (char **)command_line);
#else
PySys_SetArgv(argc + 1, command_line);
#endif
fp = fopen(script, "r");
if (!fp) {
sprintf(buf, "Can't open python script \"%s\"", script);
perror(buf);
err = -1;
goto error;
}
err = PyRun_SimpleFile(fp, script);
if (err) {
fprintf(stderr, "Error running python script %s\n", script);
goto error;
}
err = run_start_sub();
if (err) {
fprintf(stderr, "Error starting python script %s\n", script);
goto error;
}
set_table_handlers(tables);
if (tables->db_export_mode) {
err = db_export__branch_types(&tables->dbe);
if (err)
goto error;
}
_free_command_line(command_line, argc + 1);
return err;
error:
Py_Finalize();
_free_command_line(command_line, argc + 1);
return err;
}
static int python_flush_script(void)
{
return 0;
}
/*
* Stop trace script
*/
static int python_stop_script(void)
{
struct tables *tables = &tables_global;
try_call_object("trace_end", NULL);
db_export__exit(&tables->dbe);
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
Py_Finalize();
return 0;
}
#ifdef HAVE_LIBTRACEEVENT
static int python_generate_script(struct tep_handle *pevent, const char *outfile)
{
int i, not_first, count, nr_events;
struct tep_event **all_events;
struct tep_event *event = NULL;
struct tep_format_field *f;
char fname[PATH_MAX];
FILE *ofp;
sprintf(fname, "%s.py", outfile);
ofp = fopen(fname, "w");
if (ofp == NULL) {
fprintf(stderr, "couldn't open %s\n", fname);
return -1;
}
fprintf(ofp, "# perf script event handlers, "
"generated by perf script -g python\n");
fprintf(ofp, "# Licensed under the terms of the GNU GPL"
" License version 2\n\n");
fprintf(ofp, "# The common_* event handler fields are the most useful "
"fields common to\n");
fprintf(ofp, "# all events. They don't necessarily correspond to "
"the 'common_*' fields\n");
fprintf(ofp, "# in the format files. Those fields not available as "
"handler params can\n");
fprintf(ofp, "# be retrieved using Python functions of the form "
"common_*(context).\n");
fprintf(ofp, "# See the perf-script-python Documentation for the list "
"of available functions.\n\n");
fprintf(ofp, "from __future__ import print_function\n\n");
fprintf(ofp, "import os\n");
fprintf(ofp, "import sys\n\n");
fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
fprintf(ofp, "\nfrom perf_trace_context import *\n");
fprintf(ofp, "from Core import *\n\n\n");
fprintf(ofp, "def trace_begin():\n");
fprintf(ofp, "\tprint(\"in trace_begin\")\n\n");
fprintf(ofp, "def trace_end():\n");
fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
nr_events = tep_get_events_count(pevent);
all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
for (i = 0; all_events && i < nr_events; i++) {
event = all_events[i];
fprintf(ofp, "def %s__%s(", event->system, event->name);
fprintf(ofp, "event_name, ");
fprintf(ofp, "context, ");
fprintf(ofp, "common_cpu,\n");
fprintf(ofp, "\tcommon_secs, ");
fprintf(ofp, "common_nsecs, ");
fprintf(ofp, "common_pid, ");
fprintf(ofp, "common_comm,\n\t");
fprintf(ofp, "common_callchain, ");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t");
fprintf(ofp, "%s", f->name);
}
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t\t");
fprintf(ofp, "perf_sample_dict");
fprintf(ofp, "):\n");
fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
"common_secs, common_nsecs,\n\t\t\t"
"common_pid, common_comm)\n\n");
fprintf(ofp, "\t\tprint(\"");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (count && count % 3 == 0) {
fprintf(ofp, "\" \\\n\t\t\"");
}
count++;
fprintf(ofp, "%s=", f->name);
if (f->flags & TEP_FIELD_IS_STRING ||
f->flags & TEP_FIELD_IS_FLAG ||
f->flags & TEP_FIELD_IS_ARRAY ||
f->flags & TEP_FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
else if (f->flags & TEP_FIELD_IS_SIGNED)
fprintf(ofp, "%%d");
else
fprintf(ofp, "%%u");
}
fprintf(ofp, "\" %% \\\n\t\t(");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t\t");
if (f->flags & TEP_FIELD_IS_FLAG) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "flag_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else if (f->flags & TEP_FIELD_IS_SYMBOLIC) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "symbol_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else
fprintf(ofp, "%s", f->name);
}
fprintf(ofp, "))\n\n");
fprintf(ofp, "\t\tprint('Sample: {'+"
"get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
fprintf(ofp, "\t\tfor node in common_callchain:");
fprintf(ofp, "\n\t\t\tif 'sym' in node:");
fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x] %%s%%s%%s%%s\" %% (");
fprintf(ofp, "\n\t\t\t\t\tnode['ip'], node['sym']['name'],");
fprintf(ofp, "\n\t\t\t\t\t\"+0x{:x}\".format(node['sym_off']) if 'sym_off' in node else \"\",");
fprintf(ofp, "\n\t\t\t\t\t\" ({})\".format(node['dso']) if 'dso' in node else \"\",");
fprintf(ofp, "\n\t\t\t\t\t\" \" + node['sym_srcline'] if 'sym_srcline' in node else \"\"))");
fprintf(ofp, "\n\t\t\telse:");
fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n");
fprintf(ofp, "\t\tprint()\n\n");
}
fprintf(ofp, "def trace_unhandled(event_name, context, "
"event_fields_dict, perf_sample_dict):\n");
fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n");
fprintf(ofp, "\t\tprint('Sample: {'+"
"get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
fprintf(ofp, "def print_header("
"event_name, cpu, secs, nsecs, pid, comm):\n"
"\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
"(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n");
fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n"
"\treturn delimiter.join"
"(['%%s=%%s'%%(k,str(v))for k,v in sorted(a_dict.items())])\n");
fclose(ofp);
fprintf(stderr, "generated Python script: %s\n", fname);
return 0;
}
#else
static int python_generate_script(struct tep_handle *pevent __maybe_unused,
const char *outfile __maybe_unused)
{
fprintf(stderr, "Generating Python perf-script is not supported."
" Install libtraceevent and rebuild perf to enable it.\n"
"For example:\n # apt install libtraceevent-dev (ubuntu)"
"\n # yum install libtraceevent-devel (Fedora)"
"\n etc.\n");
return -1;
}
#endif
struct scripting_ops python_scripting_ops = {
.name = "Python",
.dirname = "python",
.start_script = python_start_script,
.flush_script = python_flush_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.process_switch = python_process_switch,
.process_auxtrace_error = python_process_auxtrace_error,
.process_stat = python_process_stat,
.process_stat_interval = python_process_stat_interval,
.process_throttle = python_process_throttle,
.generate_script = python_generate_script,
};
| linux-master | tools/perf/util/scripting-engines/trace-event-python.c |
/*
* trace-event-perl. Feed perf script events to an embedded Perl interpreter.
*
* Copyright (C) 2009 Tom Zanussi <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/time64.h>
#include <traceevent/event-parse.h>
#include <stdbool.h>
/* perl needs the following define, right after including stdbool.h */
#define HAS_BOOL
#include <EXTERN.h>
#include <perl.h>
#include "../callchain.h"
#include "../dso.h"
#include "../machine.h"
#include "../map.h"
#include "../symbol.h"
#include "../thread.h"
#include "../event.h"
#include "../trace-event.h"
#include "../evsel.h"
#include "../debug.h"
void boot_Perf__Trace__Context(pTHX_ CV *cv);
void boot_DynaLoader(pTHX_ CV *cv);
typedef PerlInterpreter * INTERP;
void xs_init(pTHX);
void xs_init(pTHX)
{
const char *file = __FILE__;
dXSUB_SYS;
newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context,
file);
newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file);
}
INTERP my_perl;
#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(unsigned short) * 8)) - 1)
extern struct scripting_context *scripting_context;
static char *cur_field_name;
static int zero_flag_atom;
static void define_symbolic_value(const char *ev_name,
const char *field_name,
const char *field_value,
const char *field_str)
{
unsigned long long value;
dSP;
value = eval_flag(field_value);
ENTER;
SAVETMPS;
PUSHMARK(SP);
XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
XPUSHs(sv_2mortal(newSVuv(value)));
XPUSHs(sv_2mortal(newSVpv(field_str, 0)));
PUTBACK;
if (get_cv("main::define_symbolic_value", 0))
call_pv("main::define_symbolic_value", G_SCALAR);
SPAGAIN;
PUTBACK;
FREETMPS;
LEAVE;
}
static void define_symbolic_values(struct tep_print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
define_symbolic_value(ev_name, field_name, field->value, field->str);
if (field->next)
define_symbolic_values(field->next, ev_name, field_name);
}
static void define_symbolic_field(const char *ev_name,
const char *field_name)
{
dSP;
ENTER;
SAVETMPS;
PUSHMARK(SP);
XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
PUTBACK;
if (get_cv("main::define_symbolic_field", 0))
call_pv("main::define_symbolic_field", G_SCALAR);
SPAGAIN;
PUTBACK;
FREETMPS;
LEAVE;
}
static void define_flag_value(const char *ev_name,
const char *field_name,
const char *field_value,
const char *field_str)
{
unsigned long long value;
dSP;
value = eval_flag(field_value);
ENTER;
SAVETMPS;
PUSHMARK(SP);
XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
XPUSHs(sv_2mortal(newSVuv(value)));
XPUSHs(sv_2mortal(newSVpv(field_str, 0)));
PUTBACK;
if (get_cv("main::define_flag_value", 0))
call_pv("main::define_flag_value", G_SCALAR);
SPAGAIN;
PUTBACK;
FREETMPS;
LEAVE;
}
static void define_flag_values(struct tep_print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
define_flag_value(ev_name, field_name, field->value, field->str);
if (field->next)
define_flag_values(field->next, ev_name, field_name);
}
static void define_flag_field(const char *ev_name,
const char *field_name,
const char *delim)
{
dSP;
ENTER;
SAVETMPS;
PUSHMARK(SP);
XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
XPUSHs(sv_2mortal(newSVpv(delim, 0)));
PUTBACK;
if (get_cv("main::define_flag_field", 0))
call_pv("main::define_flag_field", G_SCALAR);
SPAGAIN;
PUTBACK;
FREETMPS;
LEAVE;
}
static void define_event_symbols(struct tep_event *event,
const char *ev_name,
struct tep_print_arg *args)
{
if (args == NULL)
return;
switch (args->type) {
case TEP_PRINT_NULL:
break;
case TEP_PRINT_ATOM:
define_flag_value(ev_name, cur_field_name, "0",
args->atom.atom);
zero_flag_atom = 0;
break;
case TEP_PRINT_FIELD:
free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case TEP_PRINT_FLAGS:
define_event_symbols(event, ev_name, args->flags.field);
define_flag_field(ev_name, cur_field_name, args->flags.delim);
define_flag_values(args->flags.flags, ev_name, cur_field_name);
break;
case TEP_PRINT_SYMBOL:
define_event_symbols(event, ev_name, args->symbol.field);
define_symbolic_field(ev_name, cur_field_name);
define_symbolic_values(args->symbol.symbols, ev_name,
cur_field_name);
break;
case TEP_PRINT_HEX:
case TEP_PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
case TEP_PRINT_INT_ARRAY:
define_event_symbols(event, ev_name, args->int_array.field);
define_event_symbols(event, ev_name, args->int_array.count);
define_event_symbols(event, ev_name, args->int_array.el_size);
break;
case TEP_PRINT_BSTRING:
case TEP_PRINT_DYNAMIC_ARRAY:
case TEP_PRINT_DYNAMIC_ARRAY_LEN:
case TEP_PRINT_STRING:
case TEP_PRINT_BITMASK:
break;
case TEP_PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
break;
case TEP_PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
zero_flag_atom = 1;
define_event_symbols(event, ev_name, args->op.left);
define_event_symbols(event, ev_name, args->op.right);
break;
case TEP_PRINT_FUNC:
default:
pr_err("Unsupported print arg type\n");
/* we should warn... */
return;
}
if (args->next)
define_event_symbols(event, ev_name, args->next);
}
static SV *perl_process_callchain(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al)
{
struct callchain_cursor *cursor;
AV *list;
list = newAV();
if (!list)
goto exit;
if (!symbol_conf.use_callchain || !sample->callchain)
goto exit;
cursor = get_tls_callchain_cursor();
if (thread__resolve_callchain(al->thread, cursor, evsel,
sample, NULL, NULL, scripting_max_stack) != 0) {
pr_err("Failed to resolve callchain. Skipping\n");
goto exit;
}
callchain_cursor_commit(cursor);
while (1) {
HV *elem;
struct callchain_cursor_node *node;
node = callchain_cursor_current(cursor);
if (!node)
break;
elem = newHV();
if (!elem)
goto exit;
if (!hv_stores(elem, "ip", newSVuv(node->ip))) {
hv_undef(elem);
goto exit;
}
if (node->ms.sym) {
HV *sym = newHV();
if (!sym) {
hv_undef(elem);
goto exit;
}
if (!hv_stores(sym, "start", newSVuv(node->ms.sym->start)) ||
!hv_stores(sym, "end", newSVuv(node->ms.sym->end)) ||
!hv_stores(sym, "binding", newSVuv(node->ms.sym->binding)) ||
!hv_stores(sym, "name", newSVpvn(node->ms.sym->name,
node->ms.sym->namelen)) ||
!hv_stores(elem, "sym", newRV_noinc((SV*)sym))) {
hv_undef(sym);
hv_undef(elem);
goto exit;
}
}
if (node->ms.map) {
struct map *map = node->ms.map;
struct dso *dso = map ? map__dso(map) : NULL;
const char *dsoname = "[unknown]";
if (dso) {
if (symbol_conf.show_kernel_path && dso->long_name)
dsoname = dso->long_name;
else
dsoname = dso->name;
}
if (!hv_stores(elem, "dso", newSVpv(dsoname,0))) {
hv_undef(elem);
goto exit;
}
}
callchain_cursor_advance(cursor);
av_push(list, newRV_noinc((SV*)elem));
}
exit:
return newRV_noinc((SV*)list);
}
static void perl_process_tracepoint(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al)
{
struct thread *thread = al->thread;
struct tep_event *event = evsel->tp_format;
struct tep_format_field *field;
static char handler[256];
unsigned long long val;
unsigned long s, ns;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
const char *comm = thread__comm_str(thread);
DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX);
bitmap_zero(events_defined, TRACE_EVENT_TYPE_MAX);
dSP;
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
return;
if (!event) {
pr_debug("ug! no event found for type %" PRIu64, (u64)evsel->core.attr.config);
return;
}
pid = raw_field_value(event, "common_pid", data);
sprintf(handler, "%s::%s", event->system, event->name);
if (!__test_and_set_bit(event->id, events_defined))
define_event_symbols(event, handler, event->print_fmt.args);
s = nsecs / NSEC_PER_SEC;
ns = nsecs - s * NSEC_PER_SEC;
ENTER;
SAVETMPS;
PUSHMARK(SP);
XPUSHs(sv_2mortal(newSVpv(handler, 0)));
XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context))));
XPUSHs(sv_2mortal(newSVuv(cpu)));
XPUSHs(sv_2mortal(newSVuv(s)));
XPUSHs(sv_2mortal(newSVuv(ns)));
XPUSHs(sv_2mortal(newSViv(pid)));
XPUSHs(sv_2mortal(newSVpv(comm, 0)));
XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al)));
/* common fields other than pid can be accessed via xsub fns */
for (field = event->format.fields; field; field = field->next) {
if (field->flags & TEP_FIELD_IS_STRING) {
int offset;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
offset = *(int *)(data + field->offset);
offset &= 0xffff;
if (tep_field_is_relative(field->flags))
offset += field->offset + field->size;
} else
offset = field->offset;
XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
} else { /* FIELD_IS_NUMERIC */
val = read_size(event, data + field->offset,
field->size);
if (field->flags & TEP_FIELD_IS_SIGNED) {
XPUSHs(sv_2mortal(newSViv(val)));
} else {
XPUSHs(sv_2mortal(newSVuv(val)));
}
}
}
PUTBACK;
if (get_cv(handler, 0))
call_pv(handler, G_SCALAR);
else if (get_cv("main::trace_unhandled", 0)) {
XPUSHs(sv_2mortal(newSVpv(handler, 0)));
XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context))));
XPUSHs(sv_2mortal(newSVuv(cpu)));
XPUSHs(sv_2mortal(newSVuv(nsecs)));
XPUSHs(sv_2mortal(newSViv(pid)));
XPUSHs(sv_2mortal(newSVpv(comm, 0)));
XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al)));
call_pv("main::trace_unhandled", G_SCALAR);
}
SPAGAIN;
PUTBACK;
FREETMPS;
LEAVE;
}
static void perl_process_event_generic(union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel)
{
dSP;
if (!get_cv("process_event", 0))
return;
ENTER;
SAVETMPS;
PUSHMARK(SP);
XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size)));
XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->core.attr, sizeof(evsel->core.attr))));
XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample))));
XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size)));
PUTBACK;
call_pv("process_event", G_SCALAR);
SPAGAIN;
PUTBACK;
FREETMPS;
LEAVE;
}
static void perl_process_event(union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
struct addr_location *addr_al)
{
scripting_context__update(scripting_context, event, sample, evsel, al, addr_al);
perl_process_tracepoint(sample, evsel, al);
perl_process_event_generic(event, sample, evsel);
}
static void run_start_sub(void)
{
dSP; /* access to Perl stack */
PUSHMARK(SP);
if (get_cv("main::trace_begin", 0))
call_pv("main::trace_begin", G_DISCARD | G_NOARGS);
}
/*
* Start trace script
*/
static int perl_start_script(const char *script, int argc, const char **argv,
struct perf_session *session)
{
const char **command_line;
int i, err = 0;
scripting_context->session = session;
command_line = malloc((argc + 2) * sizeof(const char *));
command_line[0] = "";
command_line[1] = script;
for (i = 2; i < argc + 2; i++)
command_line[i] = argv[i - 2];
my_perl = perl_alloc();
perl_construct(my_perl);
if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line,
(char **)NULL)) {
err = -1;
goto error;
}
if (perl_run(my_perl)) {
err = -1;
goto error;
}
if (SvTRUE(ERRSV)) {
err = -1;
goto error;
}
run_start_sub();
free(command_line);
return 0;
error:
perl_free(my_perl);
free(command_line);
return err;
}
static int perl_flush_script(void)
{
return 0;
}
/*
* Stop trace script
*/
static int perl_stop_script(void)
{
dSP; /* access to Perl stack */
PUSHMARK(SP);
if (get_cv("main::trace_end", 0))
call_pv("main::trace_end", G_DISCARD | G_NOARGS);
perl_destruct(my_perl);
perl_free(my_perl);
return 0;
}
static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
{
int i, not_first, count, nr_events;
struct tep_event **all_events;
struct tep_event *event = NULL;
struct tep_format_field *f;
char fname[PATH_MAX];
FILE *ofp;
sprintf(fname, "%s.pl", outfile);
ofp = fopen(fname, "w");
if (ofp == NULL) {
fprintf(stderr, "couldn't open %s\n", fname);
return -1;
}
fprintf(ofp, "# perf script event handlers, "
"generated by perf script -g perl\n");
fprintf(ofp, "# Licensed under the terms of the GNU GPL"
" License version 2\n\n");
fprintf(ofp, "# The common_* event handler fields are the most useful "
"fields common to\n");
fprintf(ofp, "# all events. They don't necessarily correspond to "
"the 'common_*' fields\n");
fprintf(ofp, "# in the format files. Those fields not available as "
"handler params can\n");
fprintf(ofp, "# be retrieved using Perl functions of the form "
"common_*($context).\n");
fprintf(ofp, "# See Context.pm for the list of available "
"functions.\n\n");
fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/"
"Perf-Trace-Util/lib\";\n");
fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n");
fprintf(ofp, "use Perf::Trace::Core;\n");
fprintf(ofp, "use Perf::Trace::Context;\n");
fprintf(ofp, "use Perf::Trace::Util;\n\n");
fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n");
fprintf(ofp, "\n\
sub print_backtrace\n\
{\n\
my $callchain = shift;\n\
for my $node (@$callchain)\n\
{\n\
if(exists $node->{sym})\n\
{\n\
printf( \"\\t[\\%%x] \\%%s\\n\", $node->{ip}, $node->{sym}{name});\n\
}\n\
else\n\
{\n\
printf( \"\\t[\\%%x]\\n\", $node{ip});\n\
}\n\
}\n\
}\n\n\
");
nr_events = tep_get_events_count(pevent);
all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
for (i = 0; all_events && i < nr_events; i++) {
event = all_events[i];
fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
fprintf(ofp, "\tmy (");
fprintf(ofp, "$event_name, ");
fprintf(ofp, "$context, ");
fprintf(ofp, "$common_cpu, ");
fprintf(ofp, "$common_secs, ");
fprintf(ofp, "$common_nsecs,\n");
fprintf(ofp, "\t $common_pid, ");
fprintf(ofp, "$common_comm, ");
fprintf(ofp, "$common_callchain,\n\t ");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t ");
fprintf(ofp, "$%s", f->name);
}
fprintf(ofp, ") = @_;\n\n");
fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
"$common_secs, $common_nsecs,\n\t "
"$common_pid, $common_comm, $common_callchain);\n\n");
fprintf(ofp, "\tprintf(\"");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (count && count % 4 == 0) {
fprintf(ofp, "\".\n\t \"");
}
count++;
fprintf(ofp, "%s=", f->name);
if (f->flags & TEP_FIELD_IS_STRING ||
f->flags & TEP_FIELD_IS_FLAG ||
f->flags & TEP_FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
else if (f->flags & TEP_FIELD_IS_SIGNED)
fprintf(ofp, "%%d");
else
fprintf(ofp, "%%u");
}
fprintf(ofp, "\\n\",\n\t ");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t ");
if (f->flags & TEP_FIELD_IS_FLAG) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t ");
count = 4;
}
fprintf(ofp, "flag_str(\"");
fprintf(ofp, "%s::%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", $%s)", f->name,
f->name);
} else if (f->flags & TEP_FIELD_IS_SYMBOLIC) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t ");
count = 4;
}
fprintf(ofp, "symbol_str(\"");
fprintf(ofp, "%s::%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", $%s)", f->name,
f->name);
} else
fprintf(ofp, "$%s", f->name);
}
fprintf(ofp, ");\n\n");
fprintf(ofp, "\tprint_backtrace($common_callchain);\n");
fprintf(ofp, "}\n\n");
}
fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, "
"$common_cpu, $common_secs, $common_nsecs,\n\t "
"$common_pid, $common_comm, $common_callchain) = @_;\n\n");
fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
"$common_secs, $common_nsecs,\n\t $common_pid, "
"$common_comm, $common_callchain);\n");
fprintf(ofp, "\tprint_backtrace($common_callchain);\n");
fprintf(ofp, "}\n\n");
fprintf(ofp, "sub print_header\n{\n"
"\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
"\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t "
"$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n");
fprintf(ofp,
"\n# Packed byte string args of process_event():\n"
"#\n"
"# $event:\tunion perf_event\tutil/event.h\n"
"# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n"
"# $sample:\tstruct perf_sample\tutil/event.h\n"
"# $raw_data:\tperf_sample->raw_data\tutil/event.h\n"
"\n"
"sub process_event\n"
"{\n"
"\tmy ($event, $attr, $sample, $raw_data) = @_;\n"
"\n"
"\tmy @event\t= unpack(\"LSS\", $event);\n"
"\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n"
"\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n"
"\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n"
"\n"
"\tuse Data::Dumper;\n"
"\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n"
"}\n");
fclose(ofp);
fprintf(stderr, "generated Perl script: %s\n", fname);
return 0;
}
struct scripting_ops perl_scripting_ops = {
.name = "Perl",
.dirname = "perl",
.start_script = perl_start_script,
.flush_script = perl_flush_script,
.stop_script = perl_stop_script,
.process_event = perl_process_event,
.generate_script = perl_generate_script,
};
| linux-master | tools/perf/util/scripting-engines/trace-event-perl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HiSilicon PCIe Trace and Tuning (PTT) support
* Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <endian.h>
#include <byteswap.h>
#include <linux/bitops.h>
#include <stdarg.h>
#include "../color.h"
#include "hisi-ptt-pkt-decoder.h"
/*
* For 8DW format, the bit[31:11] of DW0 is always 0x1fffff, which can be
* used to distinguish the data format.
* 8DW format is like:
* bits [ 31:11 ][ 10:0 ]
* |---------------------------------------|-------------------|
* DW0 [ 0x1fffff ][ Reserved (0x7ff) ]
* DW1 [ Prefix ]
* DW2 [ Header DW0 ]
* DW3 [ Header DW1 ]
* DW4 [ Header DW2 ]
* DW5 [ Header DW3 ]
* DW6 [ Reserved (0x0) ]
* DW7 [ Time ]
*
* 4DW format is like:
* bits [31:30] [ 29:25 ][24][23][22][21][ 20:11 ][ 10:0 ]
* |-----|---------|---|---|---|---|-------------|-------------|
* DW0 [ Fmt ][ Type ][T9][T8][TH][SO][ Length ][ Time ]
* DW1 [ Header DW1 ]
* DW2 [ Header DW2 ]
* DW3 [ Header DW3 ]
*/
enum hisi_ptt_8dw_pkt_field_type {
HISI_PTT_8DW_CHK_AND_RSV0,
HISI_PTT_8DW_PREFIX,
HISI_PTT_8DW_HEAD0,
HISI_PTT_8DW_HEAD1,
HISI_PTT_8DW_HEAD2,
HISI_PTT_8DW_HEAD3,
HISI_PTT_8DW_RSV1,
HISI_PTT_8DW_TIME,
HISI_PTT_8DW_TYPE_MAX
};
enum hisi_ptt_4dw_pkt_field_type {
HISI_PTT_4DW_HEAD1,
HISI_PTT_4DW_HEAD2,
HISI_PTT_4DW_HEAD3,
HISI_PTT_4DW_TYPE_MAX
};
static const char * const hisi_ptt_8dw_pkt_field_name[] = {
[HISI_PTT_8DW_PREFIX] = "Prefix",
[HISI_PTT_8DW_HEAD0] = "Header DW0",
[HISI_PTT_8DW_HEAD1] = "Header DW1",
[HISI_PTT_8DW_HEAD2] = "Header DW2",
[HISI_PTT_8DW_HEAD3] = "Header DW3",
[HISI_PTT_8DW_TIME] = "Time"
};
static const char * const hisi_ptt_4dw_pkt_field_name[] = {
[HISI_PTT_4DW_HEAD1] = "Header DW1",
[HISI_PTT_4DW_HEAD2] = "Header DW2",
[HISI_PTT_4DW_HEAD3] = "Header DW3",
};
union hisi_ptt_4dw {
struct {
uint32_t format : 2;
uint32_t type : 5;
uint32_t t9 : 1;
uint32_t t8 : 1;
uint32_t th : 1;
uint32_t so : 1;
uint32_t len : 10;
uint32_t time : 11;
};
uint32_t value;
};
static void hisi_ptt_print_pkt(const unsigned char *buf, int pos, const char *desc)
{
const char *color = PERF_COLOR_BLUE;
int i;
printf(".");
color_fprintf(stdout, color, " %08x: ", pos);
for (i = 0; i < HISI_PTT_FIELD_LENTH; i++)
color_fprintf(stdout, color, "%02x ", buf[pos + i]);
for (i = 0; i < HISI_PTT_MAX_SPACE_LEN; i++)
color_fprintf(stdout, color, " ");
color_fprintf(stdout, color, " %s\n", desc);
}
static int hisi_ptt_8dw_kpt_desc(const unsigned char *buf, int pos)
{
int i;
for (i = 0; i < HISI_PTT_8DW_TYPE_MAX; i++) {
/* Do not show 8DW check field and reserved fields */
if (i == HISI_PTT_8DW_CHK_AND_RSV0 || i == HISI_PTT_8DW_RSV1) {
pos += HISI_PTT_FIELD_LENTH;
continue;
}
hisi_ptt_print_pkt(buf, pos, hisi_ptt_8dw_pkt_field_name[i]);
pos += HISI_PTT_FIELD_LENTH;
}
return hisi_ptt_pkt_size[HISI_PTT_8DW_PKT];
}
static void hisi_ptt_4dw_print_dw0(const unsigned char *buf, int pos)
{
const char *color = PERF_COLOR_BLUE;
union hisi_ptt_4dw dw0;
int i;
dw0.value = *(uint32_t *)(buf + pos);
printf(".");
color_fprintf(stdout, color, " %08x: ", pos);
for (i = 0; i < HISI_PTT_FIELD_LENTH; i++)
color_fprintf(stdout, color, "%02x ", buf[pos + i]);
for (i = 0; i < HISI_PTT_MAX_SPACE_LEN; i++)
color_fprintf(stdout, color, " ");
color_fprintf(stdout, color,
" %s %x %s %x %s %x %s %x %s %x %s %x %s %x %s %x\n",
"Format", dw0.format, "Type", dw0.type, "T9", dw0.t9,
"T8", dw0.t8, "TH", dw0.th, "SO", dw0.so, "Length",
dw0.len, "Time", dw0.time);
}
static int hisi_ptt_4dw_kpt_desc(const unsigned char *buf, int pos)
{
int i;
hisi_ptt_4dw_print_dw0(buf, pos);
pos += HISI_PTT_FIELD_LENTH;
for (i = 0; i < HISI_PTT_4DW_TYPE_MAX; i++) {
hisi_ptt_print_pkt(buf, pos, hisi_ptt_4dw_pkt_field_name[i]);
pos += HISI_PTT_FIELD_LENTH;
}
return hisi_ptt_pkt_size[HISI_PTT_4DW_PKT];
}
int hisi_ptt_pkt_desc(const unsigned char *buf, int pos, enum hisi_ptt_pkt_type type)
{
if (type == HISI_PTT_8DW_PKT)
return hisi_ptt_8dw_kpt_desc(buf, pos);
return hisi_ptt_4dw_kpt_desc(buf, pos);
}
| linux-master | tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_insn_decoder.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
*/
#include <linux/kernel.h>
#include <stdio.h>
#include <string.h>
#include <endian.h>
#include <byteswap.h>
#include "../../../arch/x86/include/asm/insn.h"
#include "../../../arch/x86/lib/inat.c"
#include "../../../arch/x86/lib/insn.c"
#include "event.h"
#include "intel-pt-insn-decoder.h"
#include "dump-insn.h"
#include "util/sample.h"
#if INTEL_PT_INSN_BUF_SZ < MAX_INSN_SIZE || INTEL_PT_INSN_BUF_SZ > MAX_INSN
#error Instruction buffer size too small
#endif
/* Based on branch_type() from arch/x86/events/intel/lbr.c */
static void intel_pt_insn_decoder(struct insn *insn,
struct intel_pt_insn *intel_pt_insn)
{
enum intel_pt_insn_op op = INTEL_PT_OP_OTHER;
enum intel_pt_insn_branch branch = INTEL_PT_BR_NO_BRANCH;
int ext;
intel_pt_insn->rel = 0;
intel_pt_insn->emulated_ptwrite = false;
if (insn_is_avx(insn)) {
intel_pt_insn->op = INTEL_PT_OP_OTHER;
intel_pt_insn->branch = INTEL_PT_BR_NO_BRANCH;
intel_pt_insn->length = insn->length;
return;
}
switch (insn->opcode.bytes[0]) {
case 0xf:
switch (insn->opcode.bytes[1]) {
case 0x01:
switch (insn->modrm.bytes[0]) {
case 0xc2: /* vmlaunch */
case 0xc3: /* vmresume */
op = INTEL_PT_OP_VMENTRY;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0xca:
switch (insn->prefixes.bytes[3]) {
case 0xf2: /* erets */
op = INTEL_PT_OP_ERETS;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0xf3: /* eretu */
op = INTEL_PT_OP_ERETU;
branch = INTEL_PT_BR_INDIRECT;
break;
default:
break;
}
break;
default:
break;
}
break;
case 0x05: /* syscall */
case 0x34: /* sysenter */
op = INTEL_PT_OP_SYSCALL;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0x07: /* sysret */
case 0x35: /* sysexit */
op = INTEL_PT_OP_SYSRET;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0x80 ... 0x8f: /* jcc */
op = INTEL_PT_OP_JCC;
branch = INTEL_PT_BR_CONDITIONAL;
break;
default:
break;
}
break;
case 0x70 ... 0x7f: /* jcc */
op = INTEL_PT_OP_JCC;
branch = INTEL_PT_BR_CONDITIONAL;
break;
case 0xc2: /* near ret */
case 0xc3: /* near ret */
case 0xca: /* far ret */
case 0xcb: /* far ret */
op = INTEL_PT_OP_RET;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0xcf: /* iret */
op = INTEL_PT_OP_IRET;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0xcc ... 0xce: /* int */
op = INTEL_PT_OP_INT;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0xe8: /* call near rel */
op = INTEL_PT_OP_CALL;
branch = INTEL_PT_BR_UNCONDITIONAL;
break;
case 0x9a: /* call far absolute */
op = INTEL_PT_OP_CALL;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0xe0 ... 0xe2: /* loop */
op = INTEL_PT_OP_LOOP;
branch = INTEL_PT_BR_CONDITIONAL;
break;
case 0xe3: /* jcc */
op = INTEL_PT_OP_JCC;
branch = INTEL_PT_BR_CONDITIONAL;
break;
case 0xe9: /* jmp */
case 0xeb: /* jmp */
op = INTEL_PT_OP_JMP;
branch = INTEL_PT_BR_UNCONDITIONAL;
break;
case 0xea: /* far jmp */
op = INTEL_PT_OP_JMP;
branch = INTEL_PT_BR_INDIRECT;
break;
case 0xff: /* call near absolute, call far absolute ind */
ext = (insn->modrm.bytes[0] >> 3) & 0x7;
switch (ext) {
case 2: /* near ind call */
case 3: /* far ind call */
op = INTEL_PT_OP_CALL;
branch = INTEL_PT_BR_INDIRECT;
break;
case 4:
case 5:
op = INTEL_PT_OP_JMP;
branch = INTEL_PT_BR_INDIRECT;
break;
default:
break;
}
break;
default:
break;
}
intel_pt_insn->op = op;
intel_pt_insn->branch = branch;
intel_pt_insn->length = insn->length;
if (branch == INTEL_PT_BR_CONDITIONAL ||
branch == INTEL_PT_BR_UNCONDITIONAL) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
switch (insn->immediate.nbytes) {
case 1:
intel_pt_insn->rel = insn->immediate.value;
break;
case 2:
intel_pt_insn->rel =
bswap_16((short)insn->immediate.value);
break;
case 4:
intel_pt_insn->rel = bswap_32(insn->immediate.value);
break;
default:
intel_pt_insn->rel = 0;
break;
}
#else
intel_pt_insn->rel = insn->immediate.value;
#endif
}
}
int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
struct intel_pt_insn *intel_pt_insn)
{
struct insn insn;
int ret;
ret = insn_decode(&insn, buf, len,
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
if (ret < 0 || insn.length > len)
return -1;
intel_pt_insn_decoder(&insn, intel_pt_insn);
if (insn.length < INTEL_PT_INSN_BUF_SZ)
memcpy(intel_pt_insn->buf, buf, insn.length);
else
memcpy(intel_pt_insn->buf, buf, INTEL_PT_INSN_BUF_SZ);
return 0;
}
int arch_is_branch(const unsigned char *buf, size_t len, int x86_64)
{
struct intel_pt_insn in;
if (intel_pt_get_insn(buf, len, x86_64, &in) < 0)
return -1;
return in.branch != INTEL_PT_BR_NO_BRANCH;
}
const char *dump_insn(struct perf_insn *x, uint64_t ip __maybe_unused,
u8 *inbuf, int inlen, int *lenp)
{
struct insn insn;
int n, i, ret;
int left;
ret = insn_decode(&insn, inbuf, inlen,
x->is64bit ? INSN_MODE_64 : INSN_MODE_32);
if (ret < 0 || insn.length > inlen)
return "<bad>";
if (lenp)
*lenp = insn.length;
left = sizeof(x->out);
n = snprintf(x->out, left, "insn: ");
left -= n;
for (i = 0; i < insn.length; i++) {
n += snprintf(x->out + n, left, "%02x ", inbuf[i]);
left -= n;
}
return x->out;
}
const char *branch_name[] = {
[INTEL_PT_OP_OTHER] = "Other",
[INTEL_PT_OP_CALL] = "Call",
[INTEL_PT_OP_RET] = "Ret",
[INTEL_PT_OP_JCC] = "Jcc",
[INTEL_PT_OP_JMP] = "Jmp",
[INTEL_PT_OP_LOOP] = "Loop",
[INTEL_PT_OP_IRET] = "IRet",
[INTEL_PT_OP_INT] = "Int",
[INTEL_PT_OP_SYSCALL] = "Syscall",
[INTEL_PT_OP_SYSRET] = "Sysret",
[INTEL_PT_OP_VMENTRY] = "VMentry",
[INTEL_PT_OP_ERETS] = "Erets",
[INTEL_PT_OP_ERETU] = "Eretu",
};
const char *intel_pt_insn_name(enum intel_pt_insn_op op)
{
return branch_name[op];
}
int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf,
size_t buf_len)
{
switch (intel_pt_insn->branch) {
case INTEL_PT_BR_CONDITIONAL:
case INTEL_PT_BR_UNCONDITIONAL:
return snprintf(buf, buf_len, "%s %s%d",
intel_pt_insn_name(intel_pt_insn->op),
intel_pt_insn->rel > 0 ? "+" : "",
intel_pt_insn->rel);
case INTEL_PT_BR_NO_BRANCH:
case INTEL_PT_BR_INDIRECT:
return snprintf(buf, buf_len, "%s",
intel_pt_insn_name(intel_pt_insn->op));
default:
break;
}
return 0;
}
int intel_pt_insn_type(enum intel_pt_insn_op op)
{
switch (op) {
case INTEL_PT_OP_OTHER:
return 0;
case INTEL_PT_OP_CALL:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL;
case INTEL_PT_OP_RET:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN;
case INTEL_PT_OP_JCC:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL;
case INTEL_PT_OP_JMP:
return PERF_IP_FLAG_BRANCH;
case INTEL_PT_OP_LOOP:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL;
case INTEL_PT_OP_IRET:
case INTEL_PT_OP_ERETS:
case INTEL_PT_OP_ERETU:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_INTERRUPT;
case INTEL_PT_OP_INT:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_INTERRUPT;
case INTEL_PT_OP_SYSCALL:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_SYSCALLRET;
case INTEL_PT_OP_SYSRET:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_SYSCALLRET;
case INTEL_PT_OP_VMENTRY:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_VMENTRY;
default:
return 0;
}
}
| linux-master | tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_decoder.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include <inttypes.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include "../auxtrace.h"
#include "intel-pt-insn-decoder.h"
#include "intel-pt-pkt-decoder.h"
#include "intel-pt-decoder.h"
#include "intel-pt-log.h"
#define BITULL(x) (1ULL << (x))
/* IA32_RTIT_CTL MSR bits */
#define INTEL_PT_CYC_ENABLE BITULL(1)
#define INTEL_PT_CYC_THRESHOLD (BITULL(22) | BITULL(21) | BITULL(20) | BITULL(19))
#define INTEL_PT_CYC_THRESHOLD_SHIFT 19
#define INTEL_PT_BLK_SIZE 1024
#define BIT63 (((uint64_t)1 << 63))
#define SEVEN_BYTES 0xffffffffffffffULL
#define NO_VMCS 0xffffffffffULL
#define INTEL_PT_RETURN 1
/*
* Default maximum number of loops with no packets consumed i.e. stuck in a
* loop.
*/
#define INTEL_PT_MAX_LOOPS 100000
struct intel_pt_blk {
struct intel_pt_blk *prev;
uint64_t ip[INTEL_PT_BLK_SIZE];
};
struct intel_pt_stack {
struct intel_pt_blk *blk;
struct intel_pt_blk *spare;
int pos;
};
enum intel_pt_p_once {
INTEL_PT_PRT_ONCE_UNK_VMCS,
INTEL_PT_PRT_ONCE_ERANGE,
};
enum intel_pt_pkt_state {
INTEL_PT_STATE_NO_PSB,
INTEL_PT_STATE_NO_IP,
INTEL_PT_STATE_ERR_RESYNC,
INTEL_PT_STATE_IN_SYNC,
INTEL_PT_STATE_TNT_CONT,
INTEL_PT_STATE_TNT,
INTEL_PT_STATE_TIP,
INTEL_PT_STATE_TIP_PGD,
INTEL_PT_STATE_FUP,
INTEL_PT_STATE_FUP_NO_TIP,
INTEL_PT_STATE_FUP_IN_PSB,
INTEL_PT_STATE_RESAMPLE,
INTEL_PT_STATE_VM_TIME_CORRELATION,
};
static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
{
switch (pkt_state) {
case INTEL_PT_STATE_NO_PSB:
case INTEL_PT_STATE_NO_IP:
case INTEL_PT_STATE_ERR_RESYNC:
case INTEL_PT_STATE_IN_SYNC:
case INTEL_PT_STATE_TNT_CONT:
case INTEL_PT_STATE_RESAMPLE:
case INTEL_PT_STATE_VM_TIME_CORRELATION:
return true;
case INTEL_PT_STATE_TNT:
case INTEL_PT_STATE_TIP:
case INTEL_PT_STATE_TIP_PGD:
case INTEL_PT_STATE_FUP:
case INTEL_PT_STATE_FUP_NO_TIP:
case INTEL_PT_STATE_FUP_IN_PSB:
return false;
default:
return true;
};
}
#ifdef INTEL_PT_STRICT
#define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
#define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_NO_PSB
#define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_NO_PSB
#else
#define INTEL_PT_STATE_ERR1 (decoder->pkt_state)
#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_IP
#define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_ERR_RESYNC
#define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_IN_SYNC
#endif
struct intel_pt_decoder {
int (*get_trace)(struct intel_pt_buffer *buffer, void *data);
int (*walk_insn)(struct intel_pt_insn *intel_pt_insn,
uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
uint64_t max_insn_cnt, void *data);
bool (*pgd_ip)(uint64_t ip, void *data);
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
struct intel_pt_vmcs_info *(*findnew_vmcs_info)(void *data, uint64_t vmcs);
void *data;
struct intel_pt_state state;
const unsigned char *buf;
size_t len;
bool return_compression;
bool branch_enable;
bool mtc_insn;
bool pge;
bool have_tma;
bool have_cyc;
bool fixup_last_mtc;
bool have_last_ip;
bool in_psb;
bool hop;
bool leap;
bool emulated_ptwrite;
bool vm_time_correlation;
bool vm_tm_corr_dry_run;
bool vm_tm_corr_reliable;
bool vm_tm_corr_same_buf;
bool vm_tm_corr_continuous;
bool nr;
bool next_nr;
bool iflag;
bool next_iflag;
enum intel_pt_param_flags flags;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
uint64_t pip_payload;
uint64_t timestamp;
uint64_t tsc_timestamp;
uint64_t ref_timestamp;
uint64_t buf_timestamp;
uint64_t sample_timestamp;
uint64_t ret_addr;
uint64_t ctc_timestamp;
uint64_t ctc_delta;
uint64_t cycle_cnt;
uint64_t cyc_ref_timestamp;
uint64_t first_timestamp;
uint64_t last_reliable_timestamp;
uint64_t vmcs;
uint64_t print_once;
uint64_t last_ctc;
uint32_t last_mtc;
uint32_t tsc_ctc_ratio_n;
uint32_t tsc_ctc_ratio_d;
uint32_t tsc_ctc_mult;
uint32_t tsc_slip;
uint32_t ctc_rem_mask;
int mtc_shift;
struct intel_pt_stack stack;
enum intel_pt_pkt_state pkt_state;
enum intel_pt_pkt_ctx pkt_ctx;
enum intel_pt_pkt_ctx prev_pkt_ctx;
enum intel_pt_blk_type blk_type;
int blk_type_pos;
struct intel_pt_pkt packet;
struct intel_pt_pkt tnt;
int pkt_step;
int pkt_len;
int last_packet_type;
unsigned int cbr;
unsigned int cbr_seen;
unsigned int max_non_turbo_ratio;
double max_non_turbo_ratio_fp;
double cbr_cyc_to_tsc;
double calc_cyc_to_tsc;
bool have_calc_cyc_to_tsc;
int exec_mode;
unsigned int insn_bytes;
uint64_t period;
enum intel_pt_period_type period_type;
uint64_t tot_insn_cnt;
uint64_t period_insn_cnt;
uint64_t period_mask;
uint64_t period_ticks;
uint64_t last_masked_timestamp;
uint64_t tot_cyc_cnt;
uint64_t sample_tot_cyc_cnt;
uint64_t base_cyc_cnt;
uint64_t cyc_cnt_timestamp;
uint64_t ctl;
uint64_t cyc_threshold;
double tsc_to_cyc;
bool continuous_period;
bool overflow;
bool set_fup_tx_flags;
bool set_fup_ptw;
bool set_fup_mwait;
bool set_fup_pwre;
bool set_fup_exstop;
bool set_fup_bep;
bool set_fup_cfe_ip;
bool set_fup_cfe;
bool set_fup_mode_exec;
bool sample_cyc;
unsigned int fup_tx_flags;
unsigned int tx_flags;
uint64_t fup_ptw_payload;
uint64_t fup_mwait_payload;
uint64_t fup_pwre_payload;
uint64_t cbr_payload;
uint64_t timestamp_insn_cnt;
uint64_t sample_insn_cnt;
uint64_t stuck_ip;
struct intel_pt_pkt fup_cfe_pkt;
int max_loops;
int no_progress;
int stuck_ip_prd;
int stuck_ip_cnt;
uint64_t psb_ip;
const unsigned char *next_buf;
size_t next_len;
unsigned char temp_buf[INTEL_PT_PKT_MAX_SZ];
int evd_cnt;
struct intel_pt_evd evd[INTEL_PT_MAX_EVDS];
};
static uint64_t intel_pt_lower_power_of_2(uint64_t x)
{
int i;
for (i = 0; x != 1; i++)
x >>= 1;
return x << i;
}
__printf(1, 2)
static void p_log(const char *fmt, ...)
{
char buf[512];
va_list args;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
fprintf(stderr, "%s\n", buf);
intel_pt_log("%s\n", buf);
}
static bool intel_pt_print_once(struct intel_pt_decoder *decoder,
enum intel_pt_p_once id)
{
uint64_t bit = 1ULL << id;
if (decoder->print_once & bit)
return false;
decoder->print_once |= bit;
return true;
}
static uint64_t intel_pt_cyc_threshold(uint64_t ctl)
{
if (!(ctl & INTEL_PT_CYC_ENABLE))
return 0;
return (ctl & INTEL_PT_CYC_THRESHOLD) >> INTEL_PT_CYC_THRESHOLD_SHIFT;
}
static void intel_pt_setup_period(struct intel_pt_decoder *decoder)
{
if (decoder->period_type == INTEL_PT_PERIOD_TICKS) {
uint64_t period;
period = intel_pt_lower_power_of_2(decoder->period);
decoder->period_mask = ~(period - 1);
decoder->period_ticks = period;
}
}
static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d)
{
if (!d)
return 0;
return (t / d) * n + ((t % d) * n) / d;
}
struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
{
struct intel_pt_decoder *decoder;
if (!params->get_trace || !params->walk_insn)
return NULL;
decoder = zalloc(sizeof(struct intel_pt_decoder));
if (!decoder)
return NULL;
decoder->get_trace = params->get_trace;
decoder->walk_insn = params->walk_insn;
decoder->pgd_ip = params->pgd_ip;
decoder->lookahead = params->lookahead;
decoder->findnew_vmcs_info = params->findnew_vmcs_info;
decoder->data = params->data;
decoder->return_compression = params->return_compression;
decoder->branch_enable = params->branch_enable;
decoder->hop = params->quick >= 1;
decoder->leap = params->quick >= 2;
decoder->vm_time_correlation = params->vm_time_correlation;
decoder->vm_tm_corr_dry_run = params->vm_tm_corr_dry_run;
decoder->first_timestamp = params->first_timestamp;
decoder->last_reliable_timestamp = params->first_timestamp;
decoder->max_loops = params->max_loops ? params->max_loops : INTEL_PT_MAX_LOOPS;
decoder->flags = params->flags;
decoder->ctl = params->ctl;
decoder->period = params->period;
decoder->period_type = params->period_type;
decoder->max_non_turbo_ratio = params->max_non_turbo_ratio;
decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio;
decoder->cyc_threshold = intel_pt_cyc_threshold(decoder->ctl);
intel_pt_setup_period(decoder);
decoder->mtc_shift = params->mtc_period;
decoder->ctc_rem_mask = (1 << decoder->mtc_shift) - 1;
decoder->tsc_ctc_ratio_n = params->tsc_ctc_ratio_n;
decoder->tsc_ctc_ratio_d = params->tsc_ctc_ratio_d;
if (!decoder->tsc_ctc_ratio_n)
decoder->tsc_ctc_ratio_d = 0;
if (decoder->tsc_ctc_ratio_d) {
if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
decoder->tsc_ctc_ratio_d;
}
/*
* A TSC packet can slip past MTC packets so that the timestamp appears
* to go backwards. One estimate is that can be up to about 40 CPU
* cycles, which is certainly less than 0x1000 TSC ticks, but accept
* slippage an order of magnitude more to be on the safe side.
*/
decoder->tsc_slip = 0x10000;
intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
intel_pt_log("timestamp: tsc_ctc_ratio_d %u\n", decoder->tsc_ctc_ratio_d);
intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder->tsc_ctc_mult);
intel_pt_log("timestamp: tsc_slip %#x\n", decoder->tsc_slip);
if (decoder->hop)
intel_pt_log("Hop mode: decoding FUP and TIPs, but not TNT\n");
return decoder;
}
void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
uint64_t first_timestamp)
{
decoder->first_timestamp = first_timestamp;
}
static void intel_pt_pop_blk(struct intel_pt_stack *stack)
{
struct intel_pt_blk *blk = stack->blk;
stack->blk = blk->prev;
if (!stack->spare)
stack->spare = blk;
else
free(blk);
}
static uint64_t intel_pt_pop(struct intel_pt_stack *stack)
{
if (!stack->pos) {
if (!stack->blk)
return 0;
intel_pt_pop_blk(stack);
if (!stack->blk)
return 0;
stack->pos = INTEL_PT_BLK_SIZE;
}
return stack->blk->ip[--stack->pos];
}
static int intel_pt_alloc_blk(struct intel_pt_stack *stack)
{
struct intel_pt_blk *blk;
if (stack->spare) {
blk = stack->spare;
stack->spare = NULL;
} else {
blk = malloc(sizeof(struct intel_pt_blk));
if (!blk)
return -ENOMEM;
}
blk->prev = stack->blk;
stack->blk = blk;
stack->pos = 0;
return 0;
}
static int intel_pt_push(struct intel_pt_stack *stack, uint64_t ip)
{
int err;
if (!stack->blk || stack->pos == INTEL_PT_BLK_SIZE) {
err = intel_pt_alloc_blk(stack);
if (err)
return err;
}
stack->blk->ip[stack->pos++] = ip;
return 0;
}
static void intel_pt_clear_stack(struct intel_pt_stack *stack)
{
while (stack->blk)
intel_pt_pop_blk(stack);
stack->pos = 0;
}
static void intel_pt_free_stack(struct intel_pt_stack *stack)
{
intel_pt_clear_stack(stack);
zfree(&stack->blk);
zfree(&stack->spare);
}
void intel_pt_decoder_free(struct intel_pt_decoder *decoder)
{
intel_pt_free_stack(&decoder->stack);
free(decoder);
}
static int intel_pt_ext_err(int code)
{
switch (code) {
case -ENOMEM:
return INTEL_PT_ERR_NOMEM;
case -ENOSYS:
return INTEL_PT_ERR_INTERN;
case -EBADMSG:
return INTEL_PT_ERR_BADPKT;
case -ENODATA:
return INTEL_PT_ERR_NODATA;
case -EILSEQ:
return INTEL_PT_ERR_NOINSN;
case -ENOENT:
return INTEL_PT_ERR_MISMAT;
case -EOVERFLOW:
return INTEL_PT_ERR_OVR;
case -ENOSPC:
return INTEL_PT_ERR_LOST;
case -ELOOP:
return INTEL_PT_ERR_NELOOP;
case -ECONNRESET:
return INTEL_PT_ERR_EPTW;
default:
return INTEL_PT_ERR_UNK;
}
}
static const char *intel_pt_err_msgs[] = {
[INTEL_PT_ERR_NOMEM] = "Memory allocation failed",
[INTEL_PT_ERR_INTERN] = "Internal error",
[INTEL_PT_ERR_BADPKT] = "Bad packet",
[INTEL_PT_ERR_NODATA] = "No more data",
[INTEL_PT_ERR_NOINSN] = "Failed to get instruction",
[INTEL_PT_ERR_MISMAT] = "Trace doesn't match instruction",
[INTEL_PT_ERR_OVR] = "Overflow packet",
[INTEL_PT_ERR_LOST] = "Lost trace data",
[INTEL_PT_ERR_UNK] = "Unknown error!",
[INTEL_PT_ERR_NELOOP] = "Never-ending loop (refer perf config intel-pt.max-loops)",
[INTEL_PT_ERR_EPTW] = "Broken emulated ptwrite",
};
int intel_pt__strerror(int code, char *buf, size_t buflen)
{
if (code < 1 || code >= INTEL_PT_ERR_MAX)
code = INTEL_PT_ERR_UNK;
strlcpy(buf, intel_pt_err_msgs[code], buflen);
return 0;
}
static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
uint64_t last_ip)
{
uint64_t ip;
switch (packet->count) {
case 1:
ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
packet->payload;
break;
case 2:
ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
packet->payload;
break;
case 3:
ip = packet->payload;
/* Sign-extend 6-byte ip */
if (ip & (uint64_t)0x800000000000ULL)
ip |= (uint64_t)0xffff000000000000ULL;
break;
case 4:
ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
packet->payload;
break;
case 6:
ip = packet->payload;
break;
default:
return 0;
}
return ip;
}
static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
{
decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
decoder->have_last_ip = true;
}
static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
{
intel_pt_set_last_ip(decoder);
decoder->ip = decoder->last_ip;
}
static void intel_pt_decoder_log_packet(struct intel_pt_decoder *decoder)
{
intel_pt_log_packet(&decoder->packet, decoder->pkt_len, decoder->pos,
decoder->buf);
}
static int intel_pt_bug(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Internal error\n");
decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
return -ENOSYS;
}
static inline void intel_pt_clear_tx_flags(struct intel_pt_decoder *decoder)
{
decoder->tx_flags = 0;
}
static inline void intel_pt_update_in_tx(struct intel_pt_decoder *decoder)
{
decoder->tx_flags = decoder->packet.payload & INTEL_PT_IN_TX;
}
static inline void intel_pt_update_pip(struct intel_pt_decoder *decoder)
{
decoder->pip_payload = decoder->packet.payload;
}
static inline void intel_pt_update_nr(struct intel_pt_decoder *decoder)
{
decoder->next_nr = decoder->pip_payload & 1;
}
static inline void intel_pt_set_nr(struct intel_pt_decoder *decoder)
{
decoder->nr = decoder->pip_payload & 1;
decoder->next_nr = decoder->nr;
}
static inline void intel_pt_set_pip(struct intel_pt_decoder *decoder)
{
intel_pt_update_pip(decoder);
intel_pt_set_nr(decoder);
}
static int intel_pt_bad_packet(struct intel_pt_decoder *decoder)
{
intel_pt_clear_tx_flags(decoder);
decoder->have_tma = false;
decoder->pkt_len = 1;
decoder->pkt_step = 1;
intel_pt_decoder_log_packet(decoder);
if (decoder->pkt_state != INTEL_PT_STATE_NO_PSB) {
intel_pt_log("ERROR: Bad packet\n");
decoder->pkt_state = INTEL_PT_STATE_ERR1;
}
return -EBADMSG;
}
static inline void intel_pt_update_sample_time(struct intel_pt_decoder *decoder)
{
decoder->sample_timestamp = decoder->timestamp;
decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
decoder->state.cycles = decoder->tot_cyc_cnt;
}
static void intel_pt_reposition(struct intel_pt_decoder *decoder)
{
decoder->ip = 0;
decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
decoder->timestamp = 0;
decoder->have_tma = false;
}
static int intel_pt_get_data(struct intel_pt_decoder *decoder, bool reposition)
{
struct intel_pt_buffer buffer = { .buf = 0, };
int ret;
decoder->pkt_step = 0;
intel_pt_log("Getting more data\n");
ret = decoder->get_trace(&buffer, decoder->data);
if (ret)
return ret;
decoder->buf = buffer.buf;
decoder->len = buffer.len;
if (!decoder->len) {
intel_pt_log("No more data\n");
return -ENODATA;
}
decoder->buf_timestamp = buffer.ref_timestamp;
if (!buffer.consecutive || reposition) {
intel_pt_reposition(decoder);
decoder->ref_timestamp = buffer.ref_timestamp;
decoder->state.trace_nr = buffer.trace_nr;
decoder->vm_tm_corr_same_buf = false;
intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
decoder->ref_timestamp);
return -ENOLINK;
}
return 0;
}
static int intel_pt_get_next_data(struct intel_pt_decoder *decoder,
bool reposition)
{
if (!decoder->next_buf)
return intel_pt_get_data(decoder, reposition);
decoder->buf = decoder->next_buf;
decoder->len = decoder->next_len;
decoder->next_buf = 0;
decoder->next_len = 0;
return 0;
}
static int intel_pt_get_split_packet(struct intel_pt_decoder *decoder)
{
unsigned char *buf = decoder->temp_buf;
size_t old_len, len, n;
int ret;
old_len = decoder->len;
len = decoder->len;
memcpy(buf, decoder->buf, len);
ret = intel_pt_get_data(decoder, false);
if (ret) {
decoder->pos += old_len;
return ret < 0 ? ret : -EINVAL;
}
n = INTEL_PT_PKT_MAX_SZ - len;
if (n > decoder->len)
n = decoder->len;
memcpy(buf + len, decoder->buf, n);
len += n;
decoder->prev_pkt_ctx = decoder->pkt_ctx;
ret = intel_pt_get_packet(buf, len, &decoder->packet, &decoder->pkt_ctx);
if (ret < (int)old_len) {
decoder->next_buf = decoder->buf;
decoder->next_len = decoder->len;
decoder->buf = buf;
decoder->len = old_len;
return intel_pt_bad_packet(decoder);
}
decoder->next_buf = decoder->buf + (ret - old_len);
decoder->next_len = decoder->len - (ret - old_len);
decoder->buf = buf;
decoder->len = ret;
return ret;
}
struct intel_pt_pkt_info {
struct intel_pt_decoder *decoder;
struct intel_pt_pkt packet;
uint64_t pos;
int pkt_len;
int last_packet_type;
void *data;
};
typedef int (*intel_pt_pkt_cb_t)(struct intel_pt_pkt_info *pkt_info);
/* Lookahead packets in current buffer */
static int intel_pt_pkt_lookahead(struct intel_pt_decoder *decoder,
intel_pt_pkt_cb_t cb, void *data)
{
struct intel_pt_pkt_info pkt_info;
const unsigned char *buf = decoder->buf;
enum intel_pt_pkt_ctx pkt_ctx = decoder->pkt_ctx;
size_t len = decoder->len;
int ret;
pkt_info.decoder = decoder;
pkt_info.pos = decoder->pos;
pkt_info.pkt_len = decoder->pkt_step;
pkt_info.last_packet_type = decoder->last_packet_type;
pkt_info.data = data;
while (1) {
do {
pkt_info.pos += pkt_info.pkt_len;
buf += pkt_info.pkt_len;
len -= pkt_info.pkt_len;
if (!len)
return INTEL_PT_NEED_MORE_BYTES;
ret = intel_pt_get_packet(buf, len, &pkt_info.packet,
&pkt_ctx);
if (!ret)
return INTEL_PT_NEED_MORE_BYTES;
if (ret < 0)
return ret;
pkt_info.pkt_len = ret;
} while (pkt_info.packet.type == INTEL_PT_PAD);
ret = cb(&pkt_info);
if (ret)
return 0;
pkt_info.last_packet_type = pkt_info.packet.type;
}
}
struct intel_pt_calc_cyc_to_tsc_info {
uint64_t cycle_cnt;
unsigned int cbr;
uint32_t last_mtc;
uint64_t ctc_timestamp;
uint64_t ctc_delta;
uint64_t tsc_timestamp;
uint64_t timestamp;
bool have_tma;
bool fixup_last_mtc;
bool from_mtc;
double cbr_cyc_to_tsc;
};
/*
* MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
* 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
* provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
* packet by copying the missing bits from the current MTC assuming the least
* difference between the two, and that the current MTC comes after last_mtc.
*/
static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
uint32_t *last_mtc)
{
uint32_t first_missing_bit = 1U << (16 - mtc_shift);
uint32_t mask = ~(first_missing_bit - 1);
*last_mtc |= mtc & mask;
if (*last_mtc >= mtc) {
*last_mtc -= first_missing_bit;
*last_mtc &= 0xff;
}
}
static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
{
struct intel_pt_decoder *decoder = pkt_info->decoder;
struct intel_pt_calc_cyc_to_tsc_info *data = pkt_info->data;
uint64_t timestamp;
double cyc_to_tsc;
unsigned int cbr;
uint32_t mtc, mtc_delta, ctc, fc, ctc_rem;
switch (pkt_info->packet.type) {
case INTEL_PT_TNT:
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
case INTEL_PT_FUP:
case INTEL_PT_PSB:
case INTEL_PT_PIP:
case INTEL_PT_MODE_EXEC:
case INTEL_PT_MODE_TSX:
case INTEL_PT_PSBEND:
case INTEL_PT_PAD:
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
return 0;
case INTEL_PT_MTC:
if (!data->have_tma)
return 0;
mtc = pkt_info->packet.payload;
if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
data->fixup_last_mtc = false;
intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
&data->last_mtc);
}
if (mtc > data->last_mtc)
mtc_delta = mtc - data->last_mtc;
else
mtc_delta = mtc + 256 - data->last_mtc;
data->ctc_delta += mtc_delta << decoder->mtc_shift;
data->last_mtc = mtc;
if (decoder->tsc_ctc_mult) {
timestamp = data->ctc_timestamp +
data->ctc_delta * decoder->tsc_ctc_mult;
} else {
timestamp = data->ctc_timestamp +
multdiv(data->ctc_delta,
decoder->tsc_ctc_ratio_n,
decoder->tsc_ctc_ratio_d);
}
if (timestamp < data->timestamp)
return 1;
if (pkt_info->last_packet_type != INTEL_PT_CYC) {
data->timestamp = timestamp;
return 0;
}
break;
case INTEL_PT_TSC:
/*
* For now, do not support using TSC packets - refer
* intel_pt_calc_cyc_to_tsc().
*/
if (data->from_mtc)
return 1;
timestamp = pkt_info->packet.payload |
(data->timestamp & (0xffULL << 56));
if (data->from_mtc && timestamp < data->timestamp &&
data->timestamp - timestamp < decoder->tsc_slip)
return 1;
if (timestamp < data->timestamp)
timestamp += (1ULL << 56);
if (pkt_info->last_packet_type != INTEL_PT_CYC) {
if (data->from_mtc)
return 1;
data->tsc_timestamp = timestamp;
data->timestamp = timestamp;
return 0;
}
break;
case INTEL_PT_TMA:
if (data->from_mtc)
return 1;
if (!decoder->tsc_ctc_ratio_d)
return 0;
ctc = pkt_info->packet.payload;
fc = pkt_info->packet.count;
ctc_rem = ctc & decoder->ctc_rem_mask;
data->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
data->ctc_timestamp = data->tsc_timestamp - fc;
if (decoder->tsc_ctc_mult) {
data->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
} else {
data->ctc_timestamp -=
multdiv(ctc_rem, decoder->tsc_ctc_ratio_n,
decoder->tsc_ctc_ratio_d);
}
data->ctc_delta = 0;
data->have_tma = true;
data->fixup_last_mtc = true;
return 0;
case INTEL_PT_CYC:
data->cycle_cnt += pkt_info->packet.payload;
return 0;
case INTEL_PT_CBR:
cbr = pkt_info->packet.payload;
if (data->cbr && data->cbr != cbr)
return 1;
data->cbr = cbr;
data->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
return 0;
case INTEL_PT_TIP_PGD:
case INTEL_PT_TRACESTOP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_OVF:
case INTEL_PT_BAD: /* Does not happen */
default:
return 1;
}
if (!data->cbr && decoder->cbr) {
data->cbr = decoder->cbr;
data->cbr_cyc_to_tsc = decoder->cbr_cyc_to_tsc;
}
if (!data->cycle_cnt)
return 1;
cyc_to_tsc = (double)(timestamp - decoder->timestamp) / data->cycle_cnt;
if (data->cbr && cyc_to_tsc > data->cbr_cyc_to_tsc &&
cyc_to_tsc / data->cbr_cyc_to_tsc > 1.25) {
intel_pt_log("Timestamp: calculated %g TSC ticks per cycle too big (c.f. CBR-based value %g), pos " x64_fmt "\n",
cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
return 1;
}
decoder->calc_cyc_to_tsc = cyc_to_tsc;
decoder->have_calc_cyc_to_tsc = true;
if (data->cbr) {
intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. CBR-based value %g, pos " x64_fmt "\n",
cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
} else {
intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. unknown CBR-based value, pos " x64_fmt "\n",
cyc_to_tsc, pkt_info->pos);
}
return 1;
}
static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
bool from_mtc)
{
struct intel_pt_calc_cyc_to_tsc_info data = {
.cycle_cnt = 0,
.cbr = 0,
.last_mtc = decoder->last_mtc,
.ctc_timestamp = decoder->ctc_timestamp,
.ctc_delta = decoder->ctc_delta,
.tsc_timestamp = decoder->tsc_timestamp,
.timestamp = decoder->timestamp,
.have_tma = decoder->have_tma,
.fixup_last_mtc = decoder->fixup_last_mtc,
.from_mtc = from_mtc,
.cbr_cyc_to_tsc = 0,
};
/*
* For now, do not support using TSC packets for at least the reasons:
* 1) timing might have stopped
* 2) TSC packets within PSB+ can slip against CYC packets
*/
if (!from_mtc)
return;
intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
}
static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
{
int ret;
decoder->last_packet_type = decoder->packet.type;
do {
decoder->pos += decoder->pkt_step;
decoder->buf += decoder->pkt_step;
decoder->len -= decoder->pkt_step;
if (!decoder->len) {
ret = intel_pt_get_next_data(decoder, false);
if (ret)
return ret;
}
decoder->prev_pkt_ctx = decoder->pkt_ctx;
ret = intel_pt_get_packet(decoder->buf, decoder->len,
&decoder->packet, &decoder->pkt_ctx);
if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
ret = intel_pt_get_split_packet(decoder);
if (ret < 0)
return ret;
}
if (ret <= 0)
return intel_pt_bad_packet(decoder);
decoder->pkt_len = ret;
decoder->pkt_step = ret;
intel_pt_decoder_log_packet(decoder);
} while (decoder->packet.type == INTEL_PT_PAD);
return 0;
}
static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
{
uint64_t timestamp, masked_timestamp;
timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
masked_timestamp = timestamp & decoder->period_mask;
if (decoder->continuous_period) {
if (masked_timestamp > decoder->last_masked_timestamp)
return 1;
} else {
timestamp += 1;
masked_timestamp = timestamp & decoder->period_mask;
if (masked_timestamp > decoder->last_masked_timestamp) {
decoder->last_masked_timestamp = masked_timestamp;
decoder->continuous_period = true;
}
}
if (masked_timestamp < decoder->last_masked_timestamp)
return decoder->period_ticks;
return decoder->period_ticks - (timestamp - masked_timestamp);
}
static uint64_t intel_pt_next_sample(struct intel_pt_decoder *decoder)
{
switch (decoder->period_type) {
case INTEL_PT_PERIOD_INSTRUCTIONS:
return decoder->period - decoder->period_insn_cnt;
case INTEL_PT_PERIOD_TICKS:
return intel_pt_next_period(decoder);
case INTEL_PT_PERIOD_NONE:
case INTEL_PT_PERIOD_MTC:
default:
return 0;
}
}
static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
{
uint64_t timestamp, masked_timestamp;
switch (decoder->period_type) {
case INTEL_PT_PERIOD_INSTRUCTIONS:
decoder->period_insn_cnt = 0;
break;
case INTEL_PT_PERIOD_TICKS:
timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
masked_timestamp = timestamp & decoder->period_mask;
if (masked_timestamp > decoder->last_masked_timestamp)
decoder->last_masked_timestamp = masked_timestamp;
else
decoder->last_masked_timestamp += decoder->period_ticks;
break;
case INTEL_PT_PERIOD_NONE:
case INTEL_PT_PERIOD_MTC:
default:
break;
}
decoder->state.type |= INTEL_PT_INSTRUCTION;
}
/*
* Sample FUP instruction at the same time as reporting the FUP event, so the
* instruction sample gets the same flags as the FUP event.
*/
static void intel_pt_sample_fup_insn(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
uint64_t max_insn_cnt, insn_cnt = 0;
int err;
decoder->state.insn_op = INTEL_PT_OP_OTHER;
decoder->state.insn_len = 0;
if (!decoder->branch_enable || !decoder->pge || decoder->hop ||
decoder->ip != decoder->last_ip)
return;
if (!decoder->mtc_insn)
decoder->mtc_insn = true;
max_insn_cnt = intel_pt_next_sample(decoder);
if (max_insn_cnt != 1)
return;
err = decoder->walk_insn(&intel_pt_insn, &insn_cnt, &decoder->ip,
0, max_insn_cnt, decoder->data);
/* Ignore error, it will be reported next walk anyway */
if (err)
return;
if (intel_pt_insn.branch != INTEL_PT_BR_NO_BRANCH) {
intel_pt_log_at("ERROR: Unexpected branch at FUP instruction", decoder->ip);
return;
}
decoder->tot_insn_cnt += insn_cnt;
decoder->timestamp_insn_cnt += insn_cnt;
decoder->sample_insn_cnt += insn_cnt;
decoder->period_insn_cnt += insn_cnt;
intel_pt_sample_insn(decoder);
decoder->state.type |= INTEL_PT_INSTRUCTION;
decoder->ip += intel_pt_insn.length;
}
static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
struct intel_pt_insn *intel_pt_insn, uint64_t ip)
{
uint64_t max_insn_cnt, insn_cnt = 0;
int err;
if (!decoder->mtc_insn)
decoder->mtc_insn = true;
max_insn_cnt = intel_pt_next_sample(decoder);
err = decoder->walk_insn(intel_pt_insn, &insn_cnt, &decoder->ip, ip,
max_insn_cnt, decoder->data);
decoder->tot_insn_cnt += insn_cnt;
decoder->timestamp_insn_cnt += insn_cnt;
decoder->sample_insn_cnt += insn_cnt;
decoder->period_insn_cnt += insn_cnt;
if (err) {
decoder->no_progress = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR2;
intel_pt_log_at("ERROR: Failed to get instruction",
decoder->ip);
if (err == -ENOENT)
return -ENOLINK;
return -EILSEQ;
}
if (ip && decoder->ip == ip) {
err = -EAGAIN;
goto out;
}
if (max_insn_cnt && insn_cnt >= max_insn_cnt)
intel_pt_sample_insn(decoder);
if (intel_pt_insn->branch == INTEL_PT_BR_NO_BRANCH) {
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->ip += intel_pt_insn->length;
err = INTEL_PT_RETURN;
goto out;
}
if (intel_pt_insn->op == INTEL_PT_OP_CALL) {
/* Zero-length calls are excluded */
if (intel_pt_insn->branch != INTEL_PT_BR_UNCONDITIONAL ||
intel_pt_insn->rel) {
err = intel_pt_push(&decoder->stack, decoder->ip +
intel_pt_insn->length);
if (err)
goto out;
}
} else if (intel_pt_insn->op == INTEL_PT_OP_RET) {
decoder->ret_addr = intel_pt_pop(&decoder->stack);
}
if (intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL) {
int cnt = decoder->no_progress++;
decoder->state.from_ip = decoder->ip;
decoder->ip += intel_pt_insn->length +
intel_pt_insn->rel;
decoder->state.to_ip = decoder->ip;
err = INTEL_PT_RETURN;
/*
* Check for being stuck in a loop. This can happen if a
* decoder error results in the decoder erroneously setting the
* ip to an address that is itself in an infinite loop that
* consumes no packets. When that happens, there must be an
* unconditional branch.
*/
if (cnt) {
if (cnt == 1) {
decoder->stuck_ip = decoder->state.to_ip;
decoder->stuck_ip_prd = 1;
decoder->stuck_ip_cnt = 1;
} else if (cnt > decoder->max_loops ||
decoder->state.to_ip == decoder->stuck_ip) {
intel_pt_log_at("ERROR: Never-ending loop",
decoder->state.to_ip);
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
err = -ELOOP;
goto out;
} else if (!--decoder->stuck_ip_cnt) {
decoder->stuck_ip_prd += 1;
decoder->stuck_ip_cnt = decoder->stuck_ip_prd;
decoder->stuck_ip = decoder->state.to_ip;
}
}
goto out_no_progress;
}
out:
decoder->no_progress = 0;
out_no_progress:
decoder->state.insn_op = intel_pt_insn->op;
decoder->state.insn_len = intel_pt_insn->length;
memcpy(decoder->state.insn, intel_pt_insn->buf,
INTEL_PT_INSN_BUF_SZ);
if (decoder->tx_flags & INTEL_PT_IN_TX)
decoder->state.flags |= INTEL_PT_IN_TX;
return err;
}
static void intel_pt_mode_exec_status(struct intel_pt_decoder *decoder)
{
bool iflag = decoder->packet.count & INTEL_PT_IFLAG;
decoder->exec_mode = decoder->packet.payload;
decoder->iflag = iflag;
decoder->next_iflag = iflag;
decoder->state.from_iflag = iflag;
decoder->state.to_iflag = iflag;
}
static void intel_pt_mode_exec(struct intel_pt_decoder *decoder)
{
bool iflag = decoder->packet.count & INTEL_PT_IFLAG;
decoder->exec_mode = decoder->packet.payload;
decoder->next_iflag = iflag;
}
static void intel_pt_sample_iflag(struct intel_pt_decoder *decoder)
{
decoder->state.type |= INTEL_PT_IFLAG_CHG;
decoder->state.from_iflag = decoder->iflag;
decoder->state.to_iflag = decoder->next_iflag;
decoder->iflag = decoder->next_iflag;
}
static void intel_pt_sample_iflag_chg(struct intel_pt_decoder *decoder)
{
if (decoder->iflag != decoder->next_iflag)
intel_pt_sample_iflag(decoder);
}
static void intel_pt_clear_fup_event(struct intel_pt_decoder *decoder)
{
decoder->set_fup_tx_flags = false;
decoder->set_fup_ptw = false;
decoder->set_fup_mwait = false;
decoder->set_fup_pwre = false;
decoder->set_fup_exstop = false;
decoder->set_fup_bep = false;
decoder->set_fup_cfe_ip = false;
decoder->set_fup_cfe = false;
decoder->evd_cnt = 0;
decoder->set_fup_mode_exec = false;
decoder->iflag = decoder->next_iflag;
}
static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip)
{
enum intel_pt_sample_type type = decoder->state.type;
bool sample_fup_insn = false;
bool ret = false;
decoder->state.type &= ~INTEL_PT_BRANCH;
if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) {
bool ip = decoder->set_fup_cfe_ip;
decoder->set_fup_cfe_ip = false;
decoder->set_fup_cfe = false;
decoder->state.type |= INTEL_PT_EVT;
if (!ip && decoder->pge)
decoder->state.type |= INTEL_PT_BRANCH;
decoder->state.cfe_type = decoder->fup_cfe_pkt.count;
decoder->state.cfe_vector = decoder->fup_cfe_pkt.payload;
decoder->state.evd_cnt = decoder->evd_cnt;
decoder->state.evd = decoder->evd;
decoder->evd_cnt = 0;
if (ip || decoder->pge)
decoder->state.flags |= INTEL_PT_FUP_IP;
ret = true;
}
if (decoder->set_fup_mode_exec) {
decoder->set_fup_mode_exec = false;
intel_pt_sample_iflag(decoder);
sample_fup_insn = no_tip;
ret = true;
}
if (decoder->set_fup_tx_flags) {
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
decoder->state.type |= INTEL_PT_TRANSACTION;
if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
decoder->state.type |= INTEL_PT_BRANCH;
decoder->state.flags = decoder->fup_tx_flags;
ret = true;
}
if (decoder->set_fup_ptw) {
decoder->set_fup_ptw = false;
decoder->state.type |= INTEL_PT_PTW;
decoder->state.flags |= INTEL_PT_FUP_IP;
decoder->state.ptw_payload = decoder->fup_ptw_payload;
ret = true;
}
if (decoder->set_fup_mwait) {
decoder->set_fup_mwait = false;
decoder->state.type |= INTEL_PT_MWAIT_OP;
decoder->state.mwait_payload = decoder->fup_mwait_payload;
ret = true;
}
if (decoder->set_fup_pwre) {
decoder->set_fup_pwre = false;
decoder->state.type |= INTEL_PT_PWR_ENTRY;
decoder->state.pwre_payload = decoder->fup_pwre_payload;
ret = true;
}
if (decoder->set_fup_exstop) {
decoder->set_fup_exstop = false;
decoder->state.type |= INTEL_PT_EX_STOP;
decoder->state.flags |= INTEL_PT_FUP_IP;
ret = true;
}
if (decoder->set_fup_bep) {
decoder->set_fup_bep = false;
decoder->state.type |= INTEL_PT_BLK_ITEMS;
ret = true;
}
if (decoder->overflow) {
decoder->overflow = false;
if (!ret && !decoder->pge) {
if (decoder->hop) {
decoder->state.type = 0;
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
}
decoder->pge = true;
decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
return true;
}
}
if (ret) {
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
if (sample_fup_insn)
intel_pt_sample_fup_insn(decoder);
} else {
decoder->state.type = type;
}
return ret;
}
static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
struct intel_pt_insn *intel_pt_insn,
uint64_t ip, int err)
{
return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
ip == decoder->ip + intel_pt_insn->length;
}
static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
uint64_t ip;
int err;
ip = decoder->last_ip;
while (1) {
err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
if (err == INTEL_PT_RETURN)
return 0;
if (err == -EAGAIN ||
intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
if (intel_pt_fup_event(decoder, no_tip) && no_tip)
return 0;
return -EAGAIN;
}
decoder->set_fup_tx_flags = false;
if (err)
return err;
if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
intel_pt_log_at("ERROR: Unexpected indirect branch",
decoder->ip);
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
return -ENOENT;
}
if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
intel_pt_log_at("ERROR: Unexpected conditional branch",
decoder->ip);
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
return -ENOENT;
}
intel_pt_bug(decoder);
}
}
static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
int err;
err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
if (err == INTEL_PT_RETURN &&
decoder->pgd_ip &&
decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
(decoder->state.type & INTEL_PT_BRANCH) &&
decoder->pgd_ip(decoder->state.to_ip, decoder->data)) {
/* Unconditional branch leaving filter region */
decoder->no_progress = 0;
decoder->pge = false;
decoder->continuous_period = false;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.type |= INTEL_PT_TRACE_END;
intel_pt_update_nr(decoder);
return 0;
}
if (err == INTEL_PT_RETURN)
return 0;
if (err)
return err;
intel_pt_update_nr(decoder);
intel_pt_sample_iflag_chg(decoder);
if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
if (decoder->pkt_state == INTEL_PT_STATE_TIP_PGD) {
decoder->pge = false;
decoder->continuous_period = false;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.from_ip = decoder->ip;
if (decoder->packet.count == 0) {
decoder->state.to_ip = 0;
} else {
decoder->state.to_ip = decoder->last_ip;
decoder->ip = decoder->last_ip;
}
decoder->state.type |= INTEL_PT_TRACE_END;
} else {
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.from_ip = decoder->ip;
if (decoder->packet.count == 0) {
decoder->state.to_ip = 0;
} else {
decoder->state.to_ip = decoder->last_ip;
decoder->ip = decoder->last_ip;
}
}
return 0;
}
if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
uint64_t to_ip = decoder->ip + intel_pt_insn.length +
intel_pt_insn.rel;
if (decoder->pgd_ip &&
decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
decoder->pgd_ip(to_ip, decoder->data)) {
/* Conditional branch leaving filter region */
decoder->pge = false;
decoder->continuous_period = false;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->ip = to_ip;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = to_ip;
decoder->state.type |= INTEL_PT_TRACE_END;
return 0;
}
intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch",
decoder->ip);
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
return -ENOENT;
}
return intel_pt_bug(decoder);
}
struct eptw_data {
int bit_countdown;
uint64_t payload;
};
static int intel_pt_eptw_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
{
struct eptw_data *data = pkt_info->data;
int nr_bits;
switch (pkt_info->packet.type) {
case INTEL_PT_PAD:
case INTEL_PT_MNT:
case INTEL_PT_MODE_EXEC:
case INTEL_PT_MODE_TSX:
case INTEL_PT_MTC:
case INTEL_PT_FUP:
case INTEL_PT_CYC:
case INTEL_PT_CBR:
case INTEL_PT_TSC:
case INTEL_PT_TMA:
case INTEL_PT_PIP:
case INTEL_PT_VMCS:
case INTEL_PT_PSB:
case INTEL_PT_PSBEND:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
break;
case INTEL_PT_TNT:
nr_bits = data->bit_countdown;
if (nr_bits > pkt_info->packet.count)
nr_bits = pkt_info->packet.count;
data->payload <<= nr_bits;
data->payload |= pkt_info->packet.payload >> (64 - nr_bits);
data->bit_countdown -= nr_bits;
return !data->bit_countdown;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP_PGD:
case INTEL_PT_TIP:
case INTEL_PT_BAD:
case INTEL_PT_OVF:
case INTEL_PT_TRACESTOP:
default:
return 1;
}
return 0;
}
static int intel_pt_emulated_ptwrite(struct intel_pt_decoder *decoder)
{
int n = 64 - decoder->tnt.count;
struct eptw_data data = {
.bit_countdown = n,
.payload = decoder->tnt.payload >> n,
};
decoder->emulated_ptwrite = false;
intel_pt_log("Emulated ptwrite detected\n");
intel_pt_pkt_lookahead(decoder, intel_pt_eptw_lookahead_cb, &data);
if (data.bit_countdown)
return -ECONNRESET;
decoder->state.type = INTEL_PT_PTW;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.ptw_payload = data.payload;
return 0;
}
static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
int err;
while (1) {
if (decoder->emulated_ptwrite)
return intel_pt_emulated_ptwrite(decoder);
err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
if (err == INTEL_PT_RETURN) {
decoder->emulated_ptwrite = intel_pt_insn.emulated_ptwrite;
return 0;
}
if (err) {
decoder->emulated_ptwrite = false;
return err;
}
if (intel_pt_insn.op == INTEL_PT_OP_RET) {
if (!decoder->return_compression) {
intel_pt_log_at("ERROR: RET when expecting conditional branch",
decoder->ip);
decoder->pkt_state = INTEL_PT_STATE_ERR3;
return -ENOENT;
}
if (!decoder->ret_addr) {
intel_pt_log_at("ERROR: Bad RET compression (stack empty)",
decoder->ip);
decoder->pkt_state = INTEL_PT_STATE_ERR3;
return -ENOENT;
}
if (!(decoder->tnt.payload & BIT63)) {
intel_pt_log_at("ERROR: Bad RET compression (TNT=N)",
decoder->ip);
decoder->pkt_state = INTEL_PT_STATE_ERR3;
return -ENOENT;
}
decoder->tnt.count -= 1;
if (decoder->tnt.count)
decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->tnt.payload <<= 1;
decoder->state.from_ip = decoder->ip;
decoder->ip = decoder->ret_addr;
decoder->state.to_ip = decoder->ip;
return 0;
}
if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
/* Handle deferred TIPs */
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type != INTEL_PT_TIP ||
decoder->packet.count == 0) {
intel_pt_log_at("ERROR: Missing deferred TIP for indirect branch",
decoder->ip);
decoder->pkt_state = INTEL_PT_STATE_ERR3;
decoder->pkt_step = 0;
return -ENOENT;
}
intel_pt_set_last_ip(decoder);
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = decoder->last_ip;
decoder->ip = decoder->last_ip;
intel_pt_update_nr(decoder);
intel_pt_sample_iflag_chg(decoder);
return 0;
}
if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
decoder->tnt.count -= 1;
if (decoder->tnt.count)
decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
if (decoder->tnt.payload & BIT63) {
decoder->tnt.payload <<= 1;
decoder->state.from_ip = decoder->ip;
decoder->ip += intel_pt_insn.length +
intel_pt_insn.rel;
decoder->state.to_ip = decoder->ip;
return 0;
}
/* Instruction sample for a non-taken branch */
if (decoder->state.type & INTEL_PT_INSTRUCTION) {
decoder->tnt.payload <<= 1;
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->ip += intel_pt_insn.length;
return 0;
}
decoder->sample_cyc = false;
decoder->ip += intel_pt_insn.length;
if (!decoder->tnt.count) {
intel_pt_update_sample_time(decoder);
return -EAGAIN;
}
decoder->tnt.payload <<= 1;
continue;
}
return intel_pt_bug(decoder);
}
}
static int intel_pt_mode_tsx(struct intel_pt_decoder *decoder, bool *no_tip)
{
unsigned int fup_tx_flags;
int err;
fup_tx_flags = decoder->packet.payload &
(INTEL_PT_IN_TX | INTEL_PT_ABORT_TX);
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->fup_tx_flags = fup_tx_flags;
decoder->set_fup_tx_flags = true;
if (!(decoder->fup_tx_flags & INTEL_PT_ABORT_TX))
*no_tip = true;
} else {
intel_pt_log_at("ERROR: Missing FUP after MODE.TSX",
decoder->pos);
intel_pt_update_in_tx(decoder);
}
return 0;
}
static int intel_pt_evd(struct intel_pt_decoder *decoder)
{
if (decoder->evd_cnt >= INTEL_PT_MAX_EVDS) {
intel_pt_log_at("ERROR: Too many EVD packets", decoder->pos);
return -ENOSYS;
}
decoder->evd[decoder->evd_cnt++] = (struct intel_pt_evd){
.type = decoder->packet.count,
.payload = decoder->packet.payload,
};
return 0;
}
static uint64_t intel_pt_8b_tsc(uint64_t timestamp, uint64_t ref_timestamp)
{
timestamp |= (ref_timestamp & (0xffULL << 56));
if (timestamp < ref_timestamp) {
if (ref_timestamp - timestamp > (1ULL << 55))
timestamp += (1ULL << 56);
} else {
if (timestamp - ref_timestamp > (1ULL << 55))
timestamp -= (1ULL << 56);
}
return timestamp;
}
/* For use only when decoder->vm_time_correlation is true */
static bool intel_pt_time_in_range(struct intel_pt_decoder *decoder,
uint64_t timestamp)
{
uint64_t max_timestamp = decoder->buf_timestamp;
if (!max_timestamp) {
max_timestamp = decoder->last_reliable_timestamp +
0x400000000ULL;
}
return timestamp >= decoder->last_reliable_timestamp &&
timestamp < decoder->buf_timestamp;
}
static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t timestamp;
bool bad = false;
decoder->have_tma = false;
if (decoder->ref_timestamp) {
timestamp = intel_pt_8b_tsc(decoder->packet.payload,
decoder->ref_timestamp);
decoder->tsc_timestamp = timestamp;
decoder->timestamp = timestamp;
decoder->ref_timestamp = 0;
decoder->timestamp_insn_cnt = 0;
} else if (decoder->timestamp) {
timestamp = decoder->packet.payload |
(decoder->timestamp & (0xffULL << 56));
decoder->tsc_timestamp = timestamp;
if (timestamp < decoder->timestamp &&
decoder->timestamp - timestamp < decoder->tsc_slip) {
intel_pt_log_to("Suppressing backwards timestamp",
timestamp);
timestamp = decoder->timestamp;
}
if (timestamp < decoder->timestamp) {
if (!decoder->buf_timestamp ||
(timestamp + (1ULL << 56) < decoder->buf_timestamp)) {
intel_pt_log_to("Wraparound timestamp", timestamp);
timestamp += (1ULL << 56);
decoder->tsc_timestamp = timestamp;
} else {
intel_pt_log_to("Suppressing bad timestamp", timestamp);
timestamp = decoder->timestamp;
bad = true;
}
}
if (decoder->vm_time_correlation &&
(bad || !intel_pt_time_in_range(decoder, timestamp)) &&
intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
p_log("Timestamp out of range");
decoder->timestamp = timestamp;
decoder->timestamp_insn_cnt = 0;
}
if (decoder->last_packet_type == INTEL_PT_CYC) {
decoder->cyc_ref_timestamp = decoder->timestamp;
decoder->cycle_cnt = 0;
decoder->have_calc_cyc_to_tsc = false;
intel_pt_calc_cyc_to_tsc(decoder, false);
}
intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
intel_pt_set_nr(decoder);
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.from_ip = decoder->ip;
decoder->ip = 0;
decoder->pge = false;
intel_pt_clear_fup_event(decoder);
decoder->overflow = true;
return -EOVERFLOW;
}
static inline void intel_pt_mtc_cyc_cnt_pge(struct intel_pt_decoder *decoder)
{
if (decoder->have_cyc)
return;
decoder->cyc_cnt_timestamp = decoder->timestamp;
decoder->base_cyc_cnt = decoder->tot_cyc_cnt;
}
static inline void intel_pt_mtc_cyc_cnt_cbr(struct intel_pt_decoder *decoder)
{
decoder->tsc_to_cyc = decoder->cbr / decoder->max_non_turbo_ratio_fp;
if (decoder->pge)
intel_pt_mtc_cyc_cnt_pge(decoder);
}
static inline void intel_pt_mtc_cyc_cnt_upd(struct intel_pt_decoder *decoder)
{
uint64_t tot_cyc_cnt, tsc_delta;
if (decoder->have_cyc)
return;
decoder->sample_cyc = true;
if (!decoder->pge || decoder->timestamp <= decoder->cyc_cnt_timestamp)
return;
tsc_delta = decoder->timestamp - decoder->cyc_cnt_timestamp;
tot_cyc_cnt = tsc_delta * decoder->tsc_to_cyc + decoder->base_cyc_cnt;
if (tot_cyc_cnt > decoder->tot_cyc_cnt)
decoder->tot_cyc_cnt = tot_cyc_cnt;
}
static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
{
uint32_t ctc = decoder->packet.payload;
uint32_t fc = decoder->packet.count;
uint32_t ctc_rem = ctc & decoder->ctc_rem_mask;
if (!decoder->tsc_ctc_ratio_d)
return;
if (decoder->pge && !decoder->in_psb)
intel_pt_mtc_cyc_cnt_pge(decoder);
else
intel_pt_mtc_cyc_cnt_upd(decoder);
decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
decoder->last_ctc = ctc - ctc_rem;
decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
if (decoder->tsc_ctc_mult) {
decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
} else {
decoder->ctc_timestamp -= multdiv(ctc_rem,
decoder->tsc_ctc_ratio_n,
decoder->tsc_ctc_ratio_d);
}
decoder->ctc_delta = 0;
decoder->have_tma = true;
decoder->fixup_last_mtc = true;
intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
}
static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t timestamp;
uint32_t mtc, mtc_delta;
if (!decoder->have_tma)
return;
mtc = decoder->packet.payload;
if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
decoder->fixup_last_mtc = false;
intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
&decoder->last_mtc);
}
if (mtc > decoder->last_mtc)
mtc_delta = mtc - decoder->last_mtc;
else
mtc_delta = mtc + 256 - decoder->last_mtc;
decoder->ctc_delta += mtc_delta << decoder->mtc_shift;
if (decoder->tsc_ctc_mult) {
timestamp = decoder->ctc_timestamp +
decoder->ctc_delta * decoder->tsc_ctc_mult;
} else {
timestamp = decoder->ctc_timestamp +
multdiv(decoder->ctc_delta,
decoder->tsc_ctc_ratio_n,
decoder->tsc_ctc_ratio_d);
}
if (timestamp < decoder->timestamp)
intel_pt_log("Suppressing MTC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
timestamp, decoder->timestamp);
else
decoder->timestamp = timestamp;
intel_pt_mtc_cyc_cnt_upd(decoder);
decoder->timestamp_insn_cnt = 0;
decoder->last_mtc = mtc;
if (decoder->last_packet_type == INTEL_PT_CYC) {
decoder->cyc_ref_timestamp = decoder->timestamp;
decoder->cycle_cnt = 0;
decoder->have_calc_cyc_to_tsc = false;
intel_pt_calc_cyc_to_tsc(decoder, true);
}
intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
{
unsigned int cbr = decoder->packet.payload & 0xff;
decoder->cbr_payload = decoder->packet.payload;
if (decoder->cbr == cbr)
return;
decoder->cbr = cbr;
decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
decoder->cyc_ref_timestamp = decoder->timestamp;
decoder->cycle_cnt = 0;
intel_pt_mtc_cyc_cnt_cbr(decoder);
}
static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t timestamp = decoder->cyc_ref_timestamp;
decoder->have_cyc = true;
decoder->cycle_cnt += decoder->packet.payload;
if (decoder->pge)
decoder->tot_cyc_cnt += decoder->packet.payload;
decoder->sample_cyc = true;
if (!decoder->cyc_ref_timestamp)
return;
if (decoder->have_calc_cyc_to_tsc)
timestamp += decoder->cycle_cnt * decoder->calc_cyc_to_tsc;
else if (decoder->cbr)
timestamp += decoder->cycle_cnt * decoder->cbr_cyc_to_tsc;
else
return;
if (timestamp < decoder->timestamp)
intel_pt_log("Suppressing CYC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
timestamp, decoder->timestamp);
else
decoder->timestamp = timestamp;
decoder->timestamp_insn_cnt = 0;
intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
static void intel_pt_bbp(struct intel_pt_decoder *decoder)
{
if (decoder->prev_pkt_ctx == INTEL_PT_NO_CTX) {
memset(decoder->state.items.mask, 0, sizeof(decoder->state.items.mask));
decoder->state.items.is_32_bit = false;
}
decoder->blk_type = decoder->packet.payload;
decoder->blk_type_pos = intel_pt_blk_type_pos(decoder->blk_type);
if (decoder->blk_type == INTEL_PT_GP_REGS)
decoder->state.items.is_32_bit = decoder->packet.count;
if (decoder->blk_type_pos < 0) {
intel_pt_log("WARNING: Unknown block type %u\n",
decoder->blk_type);
} else if (decoder->state.items.mask[decoder->blk_type_pos]) {
intel_pt_log("WARNING: Duplicate block type %u\n",
decoder->blk_type);
}
}
static void intel_pt_bip(struct intel_pt_decoder *decoder)
{
uint32_t id = decoder->packet.count;
uint32_t bit = 1 << id;
int pos = decoder->blk_type_pos;
if (pos < 0 || id >= INTEL_PT_BLK_ITEM_ID_CNT) {
intel_pt_log("WARNING: Unknown block item %u type %d\n",
id, decoder->blk_type);
return;
}
if (decoder->state.items.mask[pos] & bit) {
intel_pt_log("WARNING: Duplicate block item %u type %d\n",
id, decoder->blk_type);
}
decoder->state.items.mask[pos] |= bit;
decoder->state.items.val[pos][id] = decoder->packet.payload;
}
/* Walk PSB+ packets when already in sync. */
static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
{
int err;
decoder->in_psb = true;
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err)
goto out;
switch (decoder->packet.type) {
case INTEL_PT_PSBEND:
err = 0;
goto out;
case INTEL_PT_TIP_PGD:
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
case INTEL_PT_TNT:
case INTEL_PT_TRACESTOP:
case INTEL_PT_BAD:
case INTEL_PT_PSB:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
err = -EAGAIN;
goto out;
case INTEL_PT_OVF:
err = intel_pt_overflow(decoder);
goto out;
case INTEL_PT_TSC:
intel_pt_calc_tsc_timestamp(decoder);
break;
case INTEL_PT_TMA:
intel_pt_calc_tma(decoder);
break;
case INTEL_PT_CBR:
intel_pt_calc_cbr(decoder);
break;
case INTEL_PT_MODE_EXEC:
intel_pt_mode_exec_status(decoder);
break;
case INTEL_PT_PIP:
intel_pt_set_pip(decoder);
break;
case INTEL_PT_FUP:
decoder->pge = true;
if (decoder->packet.count) {
intel_pt_set_last_ip(decoder);
decoder->psb_ip = decoder->last_ip;
}
break;
case INTEL_PT_MODE_TSX:
intel_pt_update_in_tx(decoder);
break;
case INTEL_PT_MTC:
intel_pt_calc_mtc_timestamp(decoder);
if (decoder->period_type == INTEL_PT_PERIOD_MTC)
decoder->state.type |= INTEL_PT_INSTRUCTION;
break;
case INTEL_PT_CYC:
intel_pt_calc_cyc_timestamp(decoder);
break;
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
default:
break;
}
}
out:
decoder->in_psb = false;
return err;
}
static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
{
int err;
if (decoder->tx_flags & INTEL_PT_ABORT_TX) {
decoder->tx_flags = 0;
decoder->state.flags &= ~INTEL_PT_IN_TX;
decoder->state.flags |= INTEL_PT_ABORT_TX;
} else {
decoder->state.flags |= INTEL_PT_ASYNC;
}
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
switch (decoder->packet.type) {
case INTEL_PT_TNT:
case INTEL_PT_FUP:
case INTEL_PT_TRACESTOP:
case INTEL_PT_PSB:
case INTEL_PT_TSC:
case INTEL_PT_TMA:
case INTEL_PT_MODE_TSX:
case INTEL_PT_BAD:
case INTEL_PT_PSBEND:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
intel_pt_log("ERROR: Missing TIP after FUP\n");
decoder->pkt_state = INTEL_PT_STATE_ERR3;
decoder->pkt_step = 0;
return -ENOENT;
case INTEL_PT_CBR:
intel_pt_calc_cbr(decoder);
break;
case INTEL_PT_OVF:
return intel_pt_overflow(decoder);
case INTEL_PT_TIP_PGD:
decoder->state.from_ip = decoder->ip;
if (decoder->packet.count == 0) {
decoder->state.to_ip = 0;
} else {
intel_pt_set_ip(decoder);
decoder->state.to_ip = decoder->ip;
}
decoder->pge = false;
decoder->continuous_period = false;
decoder->state.type |= INTEL_PT_TRACE_END;
intel_pt_update_nr(decoder);
return 0;
case INTEL_PT_TIP_PGE:
decoder->pge = true;
intel_pt_log("Omitting PGE ip " x64_fmt "\n",
decoder->ip);
decoder->state.from_ip = 0;
if (decoder->packet.count == 0) {
decoder->state.to_ip = 0;
} else {
intel_pt_set_ip(decoder);
decoder->state.to_ip = decoder->ip;
}
decoder->state.type |= INTEL_PT_TRACE_BEGIN;
intel_pt_mtc_cyc_cnt_pge(decoder);
intel_pt_set_nr(decoder);
return 0;
case INTEL_PT_TIP:
decoder->state.from_ip = decoder->ip;
if (decoder->packet.count == 0) {
decoder->state.to_ip = 0;
} else {
intel_pt_set_ip(decoder);
decoder->state.to_ip = decoder->ip;
}
intel_pt_update_nr(decoder);
intel_pt_sample_iflag_chg(decoder);
return 0;
case INTEL_PT_PIP:
intel_pt_update_pip(decoder);
break;
case INTEL_PT_MTC:
intel_pt_calc_mtc_timestamp(decoder);
if (decoder->period_type == INTEL_PT_PERIOD_MTC)
decoder->state.type |= INTEL_PT_INSTRUCTION;
break;
case INTEL_PT_CYC:
intel_pt_calc_cyc_timestamp(decoder);
break;
case INTEL_PT_MODE_EXEC:
intel_pt_mode_exec(decoder);
break;
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
break;
default:
return intel_pt_bug(decoder);
}
}
}
static int intel_pt_resample(struct intel_pt_decoder *decoder)
{
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
return 0;
}
struct intel_pt_vm_tsc_info {
struct intel_pt_pkt pip_packet;
struct intel_pt_pkt vmcs_packet;
struct intel_pt_pkt tma_packet;
bool tsc, pip, vmcs, tma, psbend;
uint64_t ctc_delta;
uint64_t last_ctc;
int max_lookahead;
};
/* Lookahead and get the PIP, VMCS and TMA packets from PSB+ */
static int intel_pt_vm_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
{
struct intel_pt_vm_tsc_info *data = pkt_info->data;
switch (pkt_info->packet.type) {
case INTEL_PT_PAD:
case INTEL_PT_MNT:
case INTEL_PT_MODE_EXEC:
case INTEL_PT_MODE_TSX:
case INTEL_PT_MTC:
case INTEL_PT_FUP:
case INTEL_PT_CYC:
case INTEL_PT_CBR:
break;
case INTEL_PT_TSC:
data->tsc = true;
break;
case INTEL_PT_TMA:
data->tma_packet = pkt_info->packet;
data->tma = true;
break;
case INTEL_PT_PIP:
data->pip_packet = pkt_info->packet;
data->pip = true;
break;
case INTEL_PT_VMCS:
data->vmcs_packet = pkt_info->packet;
data->vmcs = true;
break;
case INTEL_PT_PSBEND:
data->psbend = true;
return 1;
case INTEL_PT_TIP_PGE:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_OVF:
case INTEL_PT_BAD:
case INTEL_PT_TNT:
case INTEL_PT_TIP_PGD:
case INTEL_PT_TIP:
case INTEL_PT_PSB:
case INTEL_PT_TRACESTOP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
default:
return 1;
}
return 0;
}
struct intel_pt_ovf_fup_info {
int max_lookahead;
bool found;
};
/* Lookahead to detect a FUP packet after OVF */
static int intel_pt_ovf_fup_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
{
struct intel_pt_ovf_fup_info *data = pkt_info->data;
if (pkt_info->packet.type == INTEL_PT_CYC ||
pkt_info->packet.type == INTEL_PT_MTC ||
pkt_info->packet.type == INTEL_PT_TSC)
return !--(data->max_lookahead);
data->found = pkt_info->packet.type == INTEL_PT_FUP;
return 1;
}
static bool intel_pt_ovf_fup_lookahead(struct intel_pt_decoder *decoder)
{
struct intel_pt_ovf_fup_info data = {
.max_lookahead = 16,
.found = false,
};
intel_pt_pkt_lookahead(decoder, intel_pt_ovf_fup_lookahead_cb, &data);
return data.found;
}
/* Lookahead and get the TMA packet after TSC */
static int intel_pt_tma_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
{
struct intel_pt_vm_tsc_info *data = pkt_info->data;
if (pkt_info->packet.type == INTEL_PT_CYC ||
pkt_info->packet.type == INTEL_PT_MTC)
return !--(data->max_lookahead);
if (pkt_info->packet.type == INTEL_PT_TMA) {
data->tma_packet = pkt_info->packet;
data->tma = true;
}
return 1;
}
static uint64_t intel_pt_ctc_to_tsc(struct intel_pt_decoder *decoder, uint64_t ctc)
{
if (decoder->tsc_ctc_mult)
return ctc * decoder->tsc_ctc_mult;
else
return multdiv(ctc, decoder->tsc_ctc_ratio_n, decoder->tsc_ctc_ratio_d);
}
static uint64_t intel_pt_calc_expected_tsc(struct intel_pt_decoder *decoder,
uint32_t ctc,
uint32_t fc,
uint64_t last_ctc_timestamp,
uint64_t ctc_delta,
uint32_t last_ctc)
{
/* Number of CTC ticks from last_ctc_timestamp to last_mtc */
uint64_t last_mtc_ctc = last_ctc + ctc_delta;
/*
* Number of CTC ticks from there until current TMA packet. We would
* expect last_mtc_ctc to be before ctc, but the TSC packet can slip
* past an MTC, so a sign-extended value is used.
*/
uint64_t delta = (int16_t)((uint16_t)ctc - (uint16_t)last_mtc_ctc);
/* Total CTC ticks from last_ctc_timestamp to current TMA packet */
uint64_t new_ctc_delta = ctc_delta + delta;
uint64_t expected_tsc;
/*
* Convert CTC ticks to TSC ticks, add the starting point
* (last_ctc_timestamp) and the fast counter from the TMA packet.
*/
expected_tsc = last_ctc_timestamp + intel_pt_ctc_to_tsc(decoder, new_ctc_delta) + fc;
if (intel_pt_enable_logging) {
intel_pt_log_x64(last_mtc_ctc);
intel_pt_log_x32(last_ctc);
intel_pt_log_x64(ctc_delta);
intel_pt_log_x64(delta);
intel_pt_log_x32(ctc);
intel_pt_log_x64(new_ctc_delta);
intel_pt_log_x64(last_ctc_timestamp);
intel_pt_log_x32(fc);
intel_pt_log_x64(intel_pt_ctc_to_tsc(decoder, new_ctc_delta));
intel_pt_log_x64(expected_tsc);
}
return expected_tsc;
}
static uint64_t intel_pt_expected_tsc(struct intel_pt_decoder *decoder,
struct intel_pt_vm_tsc_info *data)
{
uint32_t ctc = data->tma_packet.payload;
uint32_t fc = data->tma_packet.count;
return intel_pt_calc_expected_tsc(decoder, ctc, fc,
decoder->ctc_timestamp,
data->ctc_delta, data->last_ctc);
}
static void intel_pt_translate_vm_tsc(struct intel_pt_decoder *decoder,
struct intel_pt_vmcs_info *vmcs_info)
{
uint64_t payload = decoder->packet.payload;
/* VMX adds the TSC Offset, so subtract to get host TSC */
decoder->packet.payload -= vmcs_info->tsc_offset;
/* TSC packet has only 7 bytes */
decoder->packet.payload &= SEVEN_BYTES;
/*
* The buffer is mmapped from the data file, so this also updates the
* data file.
*/
if (!decoder->vm_tm_corr_dry_run)
memcpy((void *)decoder->buf + 1, &decoder->packet.payload, 7);
intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
" VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
payload, decoder->packet.payload, vmcs_info->vmcs,
vmcs_info->tsc_offset);
}
static void intel_pt_translate_vm_tsc_offset(struct intel_pt_decoder *decoder,
uint64_t tsc_offset)
{
struct intel_pt_vmcs_info vmcs_info = {
.vmcs = NO_VMCS,
.tsc_offset = tsc_offset
};
intel_pt_translate_vm_tsc(decoder, &vmcs_info);
}
static inline bool in_vm(uint64_t pip_payload)
{
return pip_payload & 1;
}
static inline bool pip_in_vm(struct intel_pt_pkt *pip_packet)
{
return pip_packet->payload & 1;
}
static void intel_pt_print_vmcs_info(struct intel_pt_vmcs_info *vmcs_info)
{
p_log("VMCS: %#" PRIx64 " TSC Offset %#" PRIx64,
vmcs_info->vmcs, vmcs_info->tsc_offset);
}
static void intel_pt_vm_tm_corr_psb(struct intel_pt_decoder *decoder,
struct intel_pt_vm_tsc_info *data)
{
memset(data, 0, sizeof(*data));
data->ctc_delta = decoder->ctc_delta;
data->last_ctc = decoder->last_ctc;
intel_pt_pkt_lookahead(decoder, intel_pt_vm_psb_lookahead_cb, data);
if (data->tsc && !data->psbend)
p_log("ERROR: PSB without PSBEND");
decoder->in_psb = data->psbend;
}
static void intel_pt_vm_tm_corr_first_tsc(struct intel_pt_decoder *decoder,
struct intel_pt_vm_tsc_info *data,
struct intel_pt_vmcs_info *vmcs_info,
uint64_t host_tsc)
{
if (!decoder->in_psb) {
/* Can't happen */
p_log("ERROR: First TSC is not in PSB+");
}
if (data->pip) {
if (pip_in_vm(&data->pip_packet)) { /* Guest */
if (vmcs_info && vmcs_info->tsc_offset) {
intel_pt_translate_vm_tsc(decoder, vmcs_info);
decoder->vm_tm_corr_reliable = true;
} else {
p_log("ERROR: First TSC, unknown TSC Offset");
}
} else { /* Host */
decoder->vm_tm_corr_reliable = true;
}
} else { /* Host or Guest */
decoder->vm_tm_corr_reliable = false;
if (intel_pt_time_in_range(decoder, host_tsc)) {
/* Assume Host */
} else {
/* Assume Guest */
if (vmcs_info && vmcs_info->tsc_offset)
intel_pt_translate_vm_tsc(decoder, vmcs_info);
else
p_log("ERROR: First TSC, no PIP, unknown TSC Offset");
}
}
}
static void intel_pt_vm_tm_corr_tsc(struct intel_pt_decoder *decoder,
struct intel_pt_vm_tsc_info *data)
{
struct intel_pt_vmcs_info *vmcs_info;
uint64_t tsc_offset = 0;
uint64_t vmcs;
bool reliable = true;
uint64_t expected_tsc;
uint64_t host_tsc;
uint64_t ref_timestamp;
bool assign = false;
bool assign_reliable = false;
/* Already have 'data' for the in_psb case */
if (!decoder->in_psb) {
memset(data, 0, sizeof(*data));
data->ctc_delta = decoder->ctc_delta;
data->last_ctc = decoder->last_ctc;
data->max_lookahead = 16;
intel_pt_pkt_lookahead(decoder, intel_pt_tma_lookahead_cb, data);
if (decoder->pge) {
data->pip = true;
data->pip_packet.payload = decoder->pip_payload;
}
}
/* Calculations depend on having TMA packets */
if (!data->tma) {
p_log("ERROR: TSC without TMA");
return;
}
vmcs = data->vmcs ? data->vmcs_packet.payload : decoder->vmcs;
if (vmcs == NO_VMCS)
vmcs = 0;
vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
ref_timestamp = decoder->timestamp ? decoder->timestamp : decoder->buf_timestamp;
host_tsc = intel_pt_8b_tsc(decoder->packet.payload, ref_timestamp);
if (!decoder->ctc_timestamp) {
intel_pt_vm_tm_corr_first_tsc(decoder, data, vmcs_info, host_tsc);
return;
}
expected_tsc = intel_pt_expected_tsc(decoder, data);
tsc_offset = host_tsc - expected_tsc;
/* Determine if TSC is from Host or Guest */
if (data->pip) {
if (pip_in_vm(&data->pip_packet)) { /* Guest */
if (!vmcs_info) {
/* PIP NR=1 without VMCS cannot happen */
p_log("ERROR: Missing VMCS");
intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
decoder->vm_tm_corr_reliable = false;
return;
}
} else { /* Host */
decoder->last_reliable_timestamp = host_tsc;
decoder->vm_tm_corr_reliable = true;
return;
}
} else { /* Host or Guest */
reliable = false; /* Host/Guest is a guess, so not reliable */
if (decoder->in_psb) {
if (!tsc_offset)
return; /* Zero TSC Offset, assume Host */
/*
* TSC packet has only 7 bytes of TSC. We have no
* information about the Guest's 8th byte, but it
* doesn't matter because we only need 7 bytes.
* Here, since the 8th byte is unreliable and
* irrelevant, compare only 7 byes.
*/
if (vmcs_info &&
(tsc_offset & SEVEN_BYTES) ==
(vmcs_info->tsc_offset & SEVEN_BYTES)) {
/* Same TSC Offset as last VMCS, assume Guest */
goto guest;
}
}
/*
* Check if the host_tsc is within the expected range.
* Note, we could narrow the range more by looking ahead for
* the next host TSC in the same buffer, but we don't bother to
* do that because this is probably good enough.
*/
if (host_tsc >= expected_tsc && intel_pt_time_in_range(decoder, host_tsc)) {
/* Within expected range for Host TSC, assume Host */
decoder->vm_tm_corr_reliable = false;
return;
}
}
guest: /* Assuming Guest */
/* Determine whether to assign TSC Offset */
if (vmcs_info && vmcs_info->vmcs) {
if (vmcs_info->tsc_offset && vmcs_info->reliable) {
assign = false;
} else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_reliable &&
decoder->vm_tm_corr_continuous && decoder->vm_tm_corr_same_buf) {
/* Continuous tracing, TSC in a PSB is not a time loss */
assign = true;
assign_reliable = true;
} else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_same_buf) {
/*
* Unlikely to be a time loss TSC in a PSB which is not
* at the start of a buffer.
*/
assign = true;
assign_reliable = false;
}
}
/* Record VMCS TSC Offset */
if (assign && (vmcs_info->tsc_offset != tsc_offset ||
vmcs_info->reliable != assign_reliable)) {
bool print = vmcs_info->tsc_offset != tsc_offset;
vmcs_info->tsc_offset = tsc_offset;
vmcs_info->reliable = assign_reliable;
if (print)
intel_pt_print_vmcs_info(vmcs_info);
}
/* Determine what TSC Offset to use */
if (vmcs_info && vmcs_info->tsc_offset) {
if (!vmcs_info->reliable)
reliable = false;
intel_pt_translate_vm_tsc(decoder, vmcs_info);
} else {
reliable = false;
if (vmcs_info) {
if (!vmcs_info->error_printed) {
p_log("ERROR: Unknown TSC Offset for VMCS %#" PRIx64,
vmcs_info->vmcs);
vmcs_info->error_printed = true;
}
} else {
if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
p_log("ERROR: Unknown VMCS");
}
intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
}
decoder->vm_tm_corr_reliable = reliable;
}
static void intel_pt_vm_tm_corr_pebs_tsc(struct intel_pt_decoder *decoder)
{
uint64_t host_tsc = decoder->packet.payload;
uint64_t guest_tsc = decoder->packet.payload;
struct intel_pt_vmcs_info *vmcs_info;
uint64_t vmcs;
vmcs = decoder->vmcs;
if (vmcs == NO_VMCS)
vmcs = 0;
vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
if (decoder->pge) {
if (in_vm(decoder->pip_payload)) { /* Guest */
if (!vmcs_info) {
/* PIP NR=1 without VMCS cannot happen */
p_log("ERROR: Missing VMCS");
}
} else { /* Host */
return;
}
} else { /* Host or Guest */
if (intel_pt_time_in_range(decoder, host_tsc)) {
/* Within expected range for Host TSC, assume Host */
return;
}
}
if (vmcs_info) {
/* Translate Guest TSC to Host TSC */
host_tsc = ((guest_tsc & SEVEN_BYTES) - vmcs_info->tsc_offset) & SEVEN_BYTES;
host_tsc = intel_pt_8b_tsc(host_tsc, decoder->timestamp);
intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
" VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
guest_tsc, host_tsc, vmcs_info->vmcs,
vmcs_info->tsc_offset);
if (!intel_pt_time_in_range(decoder, host_tsc) &&
intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
p_log("Timestamp out of range");
} else {
if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
p_log("ERROR: Unknown VMCS");
host_tsc = decoder->timestamp;
}
decoder->packet.payload = host_tsc;
if (!decoder->vm_tm_corr_dry_run)
memcpy((void *)decoder->buf + 1, &host_tsc, 8);
}
static int intel_pt_vm_time_correlation(struct intel_pt_decoder *decoder)
{
struct intel_pt_vm_tsc_info data = { .psbend = false };
bool pge;
int err;
if (decoder->in_psb)
intel_pt_vm_tm_corr_psb(decoder, &data);
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err == -ENOLINK)
continue;
if (err)
break;
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->pge = false;
decoder->vm_tm_corr_continuous = false;
break;
case INTEL_PT_TNT:
case INTEL_PT_TIP:
case INTEL_PT_TIP_PGE:
decoder->pge = true;
break;
case INTEL_PT_OVF:
decoder->in_psb = false;
pge = decoder->pge;
decoder->pge = intel_pt_ovf_fup_lookahead(decoder);
if (pge != decoder->pge)
intel_pt_log("Surprising PGE change in OVF!");
if (!decoder->pge)
decoder->vm_tm_corr_continuous = false;
break;
case INTEL_PT_FUP:
if (decoder->in_psb)
decoder->pge = true;
break;
case INTEL_PT_TRACESTOP:
decoder->pge = false;
decoder->vm_tm_corr_continuous = false;
decoder->have_tma = false;
break;
case INTEL_PT_PSB:
intel_pt_vm_tm_corr_psb(decoder, &data);
break;
case INTEL_PT_PIP:
decoder->pip_payload = decoder->packet.payload;
break;
case INTEL_PT_MTC:
intel_pt_calc_mtc_timestamp(decoder);
break;
case INTEL_PT_TSC:
intel_pt_vm_tm_corr_tsc(decoder, &data);
intel_pt_calc_tsc_timestamp(decoder);
decoder->vm_tm_corr_same_buf = true;
decoder->vm_tm_corr_continuous = decoder->pge;
break;
case INTEL_PT_TMA:
intel_pt_calc_tma(decoder);
break;
case INTEL_PT_CYC:
intel_pt_calc_cyc_timestamp(decoder);
break;
case INTEL_PT_CBR:
intel_pt_calc_cbr(decoder);
break;
case INTEL_PT_PSBEND:
decoder->in_psb = false;
data.psbend = false;
break;
case INTEL_PT_VMCS:
if (decoder->packet.payload != NO_VMCS)
decoder->vmcs = decoder->packet.payload;
break;
case INTEL_PT_BBP:
decoder->blk_type = decoder->packet.payload;
break;
case INTEL_PT_BIP:
if (decoder->blk_type == INTEL_PT_PEBS_BASIC &&
decoder->packet.count == 2)
intel_pt_vm_tm_corr_pebs_tsc(decoder);
break;
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
decoder->blk_type = 0;
break;
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
case INTEL_PT_MODE_EXEC:
case INTEL_PT_MODE_TSX:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_PTWRITE:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_PWRX:
case INTEL_PT_BAD: /* Does not happen */
default:
break;
}
}
return err;
}
#define HOP_PROCESS 0
#define HOP_IGNORE 1
#define HOP_RETURN 2
#define HOP_AGAIN 3
static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
/* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
{
*err = 0;
/* Leap from PSB to PSB, getting ip from FUP within PSB+ */
if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
*err = intel_pt_scan_for_psb(decoder);
if (*err)
return HOP_RETURN;
}
switch (decoder->packet.type) {
case INTEL_PT_TNT:
return HOP_IGNORE;
case INTEL_PT_TIP_PGD:
decoder->pge = false;
if (!decoder->packet.count) {
intel_pt_set_nr(decoder);
return HOP_IGNORE;
}
intel_pt_set_ip(decoder);
decoder->state.type |= INTEL_PT_TRACE_END;
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
intel_pt_update_nr(decoder);
return HOP_RETURN;
case INTEL_PT_TIP:
if (!decoder->packet.count) {
intel_pt_set_nr(decoder);
return HOP_IGNORE;
}
intel_pt_set_ip(decoder);
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
intel_pt_update_nr(decoder);
intel_pt_sample_iflag_chg(decoder);
return HOP_RETURN;
case INTEL_PT_FUP:
if (!decoder->packet.count)
return HOP_IGNORE;
intel_pt_set_ip(decoder);
if (decoder->set_fup_mwait || decoder->set_fup_pwre)
*no_tip = true;
if (!decoder->branch_enable || !decoder->pge)
*no_tip = true;
if (*no_tip) {
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
intel_pt_fup_event(decoder, *no_tip);
return HOP_RETURN;
}
intel_pt_fup_event(decoder, *no_tip);
decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
*err = intel_pt_walk_fup_tip(decoder);
if (!*err && decoder->state.to_ip)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
return HOP_RETURN;
case INTEL_PT_PSB:
decoder->state.psb_offset = decoder->pos;
decoder->psb_ip = 0;
decoder->last_ip = 0;
decoder->have_last_ip = true;
*err = intel_pt_walk_psbend(decoder);
if (*err == -EAGAIN)
return HOP_AGAIN;
if (*err)
return HOP_RETURN;
decoder->state.type = INTEL_PT_PSB_EVT;
if (decoder->psb_ip) {
decoder->state.type |= INTEL_PT_INSTRUCTION;
decoder->ip = decoder->psb_ip;
}
decoder->state.from_ip = decoder->psb_ip;
decoder->state.to_ip = 0;
return HOP_RETURN;
case INTEL_PT_BAD:
case INTEL_PT_PAD:
case INTEL_PT_TIP_PGE:
case INTEL_PT_TSC:
case INTEL_PT_TMA:
case INTEL_PT_MODE_EXEC:
case INTEL_PT_MODE_TSX:
case INTEL_PT_MTC:
case INTEL_PT_CYC:
case INTEL_PT_VMCS:
case INTEL_PT_PSBEND:
case INTEL_PT_CBR:
case INTEL_PT_TRACESTOP:
case INTEL_PT_PIP:
case INTEL_PT_OVF:
case INTEL_PT_MNT:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
default:
return HOP_PROCESS;
}
}
struct intel_pt_psb_info {
struct intel_pt_pkt fup_packet;
bool fup;
int after_psbend;
};
/* Lookahead and get the FUP packet from PSB+ */
static int intel_pt_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
{
struct intel_pt_psb_info *data = pkt_info->data;
switch (pkt_info->packet.type) {
case INTEL_PT_PAD:
case INTEL_PT_MNT:
case INTEL_PT_TSC:
case INTEL_PT_TMA:
case INTEL_PT_MODE_EXEC:
case INTEL_PT_MODE_TSX:
case INTEL_PT_MTC:
case INTEL_PT_CYC:
case INTEL_PT_VMCS:
case INTEL_PT_CBR:
case INTEL_PT_PIP:
if (data->after_psbend) {
data->after_psbend -= 1;
if (!data->after_psbend)
return 1;
}
break;
case INTEL_PT_FUP:
if (data->after_psbend)
return 1;
if (data->fup || pkt_info->packet.count == 0)
return 1;
data->fup_packet = pkt_info->packet;
data->fup = true;
break;
case INTEL_PT_PSBEND:
if (!data->fup)
return 1;
/* Keep going to check for a TIP.PGE */
data->after_psbend = 6;
break;
case INTEL_PT_TIP_PGE:
/* Ignore FUP in PSB+ if followed by TIP.PGE */
if (data->after_psbend)
data->fup = false;
return 1;
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
if (data->after_psbend) {
data->after_psbend -= 1;
if (!data->after_psbend)
return 1;
break;
}
return 1;
case INTEL_PT_OVF:
case INTEL_PT_BAD:
case INTEL_PT_TNT:
case INTEL_PT_TIP_PGD:
case INTEL_PT_TIP:
case INTEL_PT_PSB:
case INTEL_PT_TRACESTOP:
default:
return 1;
}
return 0;
}
static int intel_pt_psb(struct intel_pt_decoder *decoder)
{
int err;
decoder->last_ip = 0;
decoder->psb_ip = 0;
decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psbend(decoder);
if (err)
return err;
decoder->state.type = INTEL_PT_PSB_EVT;
decoder->state.from_ip = decoder->psb_ip;
decoder->state.to_ip = 0;
return 0;
}
static int intel_pt_fup_in_psb(struct intel_pt_decoder *decoder)
{
int err;
if (decoder->ip != decoder->last_ip) {
err = intel_pt_walk_fup(decoder);
if (!err || err != -EAGAIN)
return err;
}
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
err = intel_pt_psb(decoder);
if (err) {
decoder->pkt_state = INTEL_PT_STATE_ERR3;
return -ENOENT;
}
return 0;
}
static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
{
struct intel_pt_psb_info data = { .fup = false };
if (!decoder->branch_enable)
return false;
intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
if (!data.fup)
return false;
decoder->packet = data.fup_packet;
intel_pt_set_last_ip(decoder);
decoder->pkt_state = INTEL_PT_STATE_FUP_IN_PSB;
*err = intel_pt_fup_in_psb(decoder);
return true;
}
static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
{
int last_packet_type = INTEL_PT_PAD;
bool no_tip = false;
int err;
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
next:
err = 0;
if (decoder->cyc_threshold) {
if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
decoder->sample_cyc = false;
last_packet_type = decoder->packet.type;
}
if (decoder->hop) {
switch (intel_pt_hop_trace(decoder, &no_tip, &err)) {
case HOP_IGNORE:
continue;
case HOP_RETURN:
return err;
case HOP_AGAIN:
goto next;
default:
break;
}
}
switch (decoder->packet.type) {
case INTEL_PT_TNT:
if (!decoder->packet.count)
break;
decoder->tnt = decoder->packet;
decoder->pkt_state = INTEL_PT_STATE_TNT;
err = intel_pt_walk_tnt(decoder);
if (err == -EAGAIN)
break;
return err;
case INTEL_PT_TIP_PGD:
if (decoder->packet.count != 0)
intel_pt_set_last_ip(decoder);
decoder->pkt_state = INTEL_PT_STATE_TIP_PGD;
return intel_pt_walk_tip(decoder);
case INTEL_PT_TIP_PGE: {
decoder->pge = true;
decoder->overflow = false;
intel_pt_mtc_cyc_cnt_pge(decoder);
intel_pt_set_nr(decoder);
if (decoder->packet.count == 0) {
intel_pt_log_at("Skipping zero TIP.PGE",
decoder->pos);
break;
}
intel_pt_sample_iflag_chg(decoder);
intel_pt_set_ip(decoder);
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
decoder->state.type |= INTEL_PT_TRACE_BEGIN;
/*
* In hop mode, resample to get the to_ip as an
* "instruction" sample.
*/
if (decoder->hop)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
return 0;
}
case INTEL_PT_OVF:
return intel_pt_overflow(decoder);
case INTEL_PT_TIP:
if (decoder->packet.count != 0)
intel_pt_set_last_ip(decoder);
decoder->pkt_state = INTEL_PT_STATE_TIP;
return intel_pt_walk_tip(decoder);
case INTEL_PT_FUP:
if (decoder->packet.count == 0) {
intel_pt_log_at("Skipping zero FUP",
decoder->pos);
no_tip = false;
break;
}
intel_pt_set_last_ip(decoder);
if (!decoder->branch_enable || !decoder->pge) {
decoder->ip = decoder->last_ip;
if (intel_pt_fup_event(decoder, no_tip))
return 0;
no_tip = false;
break;
}
if (decoder->set_fup_mwait)
no_tip = true;
if (no_tip)
decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
else
decoder->pkt_state = INTEL_PT_STATE_FUP;
err = intel_pt_walk_fup(decoder);
if (err != -EAGAIN)
return err;
if (no_tip) {
no_tip = false;
break;
}
return intel_pt_walk_fup_tip(decoder);
case INTEL_PT_TRACESTOP:
decoder->pge = false;
decoder->continuous_period = false;
intel_pt_clear_tx_flags(decoder);
decoder->have_tma = false;
break;
case INTEL_PT_PSB:
decoder->state.psb_offset = decoder->pos;
decoder->psb_ip = 0;
if (intel_pt_psb_with_fup(decoder, &err))
return err;
err = intel_pt_psb(decoder);
if (err == -EAGAIN)
goto next;
return err;
case INTEL_PT_PIP:
intel_pt_update_pip(decoder);
break;
case INTEL_PT_MTC:
intel_pt_calc_mtc_timestamp(decoder);
if (decoder->period_type != INTEL_PT_PERIOD_MTC)
break;
/*
* Ensure that there has been an instruction since the
* last MTC.
*/
if (!decoder->mtc_insn)
break;
decoder->mtc_insn = false;
/* Ensure that there is a timestamp */
if (!decoder->timestamp)
break;
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->mtc_insn = false;
return 0;
case INTEL_PT_TSC:
intel_pt_calc_tsc_timestamp(decoder);
break;
case INTEL_PT_TMA:
intel_pt_calc_tma(decoder);
break;
case INTEL_PT_CYC:
intel_pt_calc_cyc_timestamp(decoder);
break;
case INTEL_PT_CBR:
intel_pt_calc_cbr(decoder);
if (decoder->cbr != decoder->cbr_seen) {
decoder->state.type = 0;
return 0;
}
break;
case INTEL_PT_MODE_EXEC:
intel_pt_mode_exec(decoder);
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->set_fup_mode_exec = true;
no_tip = true;
}
goto next;
case INTEL_PT_MODE_TSX:
/* MODE_TSX need not be followed by FUP */
if (!decoder->pge || decoder->in_psb) {
intel_pt_update_in_tx(decoder);
break;
}
err = intel_pt_mode_tsx(decoder, &no_tip);
if (err)
return err;
goto next;
case INTEL_PT_BAD: /* Does not happen */
return intel_pt_bug(decoder);
case INTEL_PT_PSBEND:
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
break;
case INTEL_PT_PTWRITE_IP:
decoder->fup_ptw_payload = decoder->packet.payload;
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->set_fup_ptw = true;
no_tip = true;
} else {
intel_pt_log_at("ERROR: Missing FUP after PTWRITE",
decoder->pos);
}
goto next;
case INTEL_PT_PTWRITE:
decoder->state.type = INTEL_PT_PTW;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.ptw_payload = decoder->packet.payload;
return 0;
case INTEL_PT_MWAIT:
decoder->fup_mwait_payload = decoder->packet.payload;
decoder->set_fup_mwait = true;
break;
case INTEL_PT_PWRE:
if (decoder->set_fup_mwait) {
decoder->fup_pwre_payload =
decoder->packet.payload;
decoder->set_fup_pwre = true;
break;
}
decoder->state.type = INTEL_PT_PWR_ENTRY;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.pwrx_payload = decoder->packet.payload;
return 0;
case INTEL_PT_EXSTOP_IP:
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->set_fup_exstop = true;
no_tip = true;
} else {
intel_pt_log_at("ERROR: Missing FUP after EXSTOP",
decoder->pos);
}
goto next;
case INTEL_PT_EXSTOP:
decoder->state.type = INTEL_PT_EX_STOP;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
return 0;
case INTEL_PT_PWRX:
decoder->state.type = INTEL_PT_PWR_EXIT;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.pwrx_payload = decoder->packet.payload;
return 0;
case INTEL_PT_BBP:
intel_pt_bbp(decoder);
break;
case INTEL_PT_BIP:
intel_pt_bip(decoder);
break;
case INTEL_PT_BEP:
decoder->state.type = INTEL_PT_BLK_ITEMS;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
return 0;
case INTEL_PT_BEP_IP:
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->set_fup_bep = true;
no_tip = true;
} else {
intel_pt_log_at("ERROR: Missing FUP after BEP",
decoder->pos);
}
goto next;
case INTEL_PT_CFE:
decoder->fup_cfe_pkt = decoder->packet;
decoder->set_fup_cfe = true;
if (!decoder->pge) {
intel_pt_fup_event(decoder, true);
return 0;
}
break;
case INTEL_PT_CFE_IP:
decoder->fup_cfe_pkt = decoder->packet;
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->set_fup_cfe_ip = true;
no_tip = true;
} else {
intel_pt_log_at("ERROR: Missing FUP after CFE",
decoder->pos);
}
goto next;
case INTEL_PT_EVD:
err = intel_pt_evd(decoder);
if (err)
return err;
break;
default:
return intel_pt_bug(decoder);
}
}
}
static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
{
return decoder->packet.count &&
(decoder->have_last_ip || decoder->packet.count == 3 ||
decoder->packet.count == 6);
}
/* Walk PSB+ packets to get in sync. */
static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
{
int err;
decoder->in_psb = true;
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err)
goto out;
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
intel_pt_log("ERROR: Unexpected packet\n");
err = -ENOENT;
goto out;
case INTEL_PT_FUP:
decoder->pge = true;
if (intel_pt_have_ip(decoder)) {
uint64_t current_ip = decoder->ip;
intel_pt_set_ip(decoder);
decoder->psb_ip = decoder->ip;
if (current_ip)
intel_pt_log_to("Setting IP",
decoder->ip);
}
break;
case INTEL_PT_MTC:
intel_pt_calc_mtc_timestamp(decoder);
break;
case INTEL_PT_TSC:
intel_pt_calc_tsc_timestamp(decoder);
break;
case INTEL_PT_TMA:
intel_pt_calc_tma(decoder);
break;
case INTEL_PT_CYC:
intel_pt_calc_cyc_timestamp(decoder);
break;
case INTEL_PT_CBR:
intel_pt_calc_cbr(decoder);
break;
case INTEL_PT_PIP:
intel_pt_set_pip(decoder);
break;
case INTEL_PT_MODE_EXEC:
intel_pt_mode_exec_status(decoder);
break;
case INTEL_PT_MODE_TSX:
intel_pt_update_in_tx(decoder);
break;
case INTEL_PT_TRACESTOP:
decoder->pge = false;
decoder->continuous_period = false;
intel_pt_clear_tx_flags(decoder);
fallthrough;
case INTEL_PT_TNT:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
if (decoder->ip)
decoder->pkt_state = INTEL_PT_STATE_ERR4;
else
decoder->pkt_state = INTEL_PT_STATE_ERR3;
err = -ENOENT;
goto out;
case INTEL_PT_BAD: /* Does not happen */
err = intel_pt_bug(decoder);
goto out;
case INTEL_PT_OVF:
err = intel_pt_overflow(decoder);
goto out;
case INTEL_PT_PSBEND:
err = 0;
goto out;
case INTEL_PT_PSB:
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
default:
break;
}
}
out:
decoder->in_psb = false;
return err;
}
static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
{
int err;
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
decoder->pge = false;
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (!decoder->ip)
break;
decoder->state.type |= INTEL_PT_TRACE_END;
return 0;
case INTEL_PT_TIP_PGE:
decoder->pge = true;
intel_pt_mtc_cyc_cnt_pge(decoder);
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (!decoder->ip)
break;
decoder->state.type |= INTEL_PT_TRACE_BEGIN;
return 0;
case INTEL_PT_TIP:
decoder->pge = true;
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (!decoder->ip)
break;
return 0;
case INTEL_PT_FUP:
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (decoder->ip)
return 0;
break;
case INTEL_PT_MTC:
intel_pt_calc_mtc_timestamp(decoder);
break;
case INTEL_PT_TSC:
intel_pt_calc_tsc_timestamp(decoder);
break;
case INTEL_PT_TMA:
intel_pt_calc_tma(decoder);
break;
case INTEL_PT_CYC:
intel_pt_calc_cyc_timestamp(decoder);
break;
case INTEL_PT_CBR:
intel_pt_calc_cbr(decoder);
break;
case INTEL_PT_PIP:
intel_pt_set_pip(decoder);
break;
case INTEL_PT_MODE_EXEC:
intel_pt_mode_exec_status(decoder);
break;
case INTEL_PT_MODE_TSX:
intel_pt_update_in_tx(decoder);
break;
case INTEL_PT_OVF:
return intel_pt_overflow(decoder);
case INTEL_PT_BAD: /* Does not happen */
return intel_pt_bug(decoder);
case INTEL_PT_TRACESTOP:
decoder->pge = false;
decoder->continuous_period = false;
intel_pt_clear_tx_flags(decoder);
decoder->have_tma = false;
break;
case INTEL_PT_PSB:
decoder->state.psb_offset = decoder->pos;
decoder->psb_ip = 0;
decoder->last_ip = 0;
decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psb(decoder);
if (err)
return err;
decoder->state.type = INTEL_PT_PSB_EVT;
decoder->state.from_ip = decoder->psb_ip;
decoder->state.to_ip = 0;
return 0;
case INTEL_PT_TNT:
case INTEL_PT_PSBEND:
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BBP:
case INTEL_PT_BIP:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
default:
break;
}
}
}
static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
{
int err;
intel_pt_clear_fup_event(decoder);
decoder->overflow = false;
if (!decoder->branch_enable) {
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.type = 0; /* Do not have a sample */
return 0;
}
intel_pt_log("Scanning for full IP\n");
err = intel_pt_walk_to_ip(decoder);
if (err || ((decoder->state.type & INTEL_PT_PSB_EVT) && !decoder->ip))
return err;
/* In hop mode, resample to get the to_ip as an "instruction" sample */
if (decoder->hop)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
intel_pt_log_to("Setting IP", decoder->ip);
return 0;
}
static int intel_pt_part_psb(struct intel_pt_decoder *decoder)
{
const unsigned char *end = decoder->buf + decoder->len;
size_t i;
for (i = INTEL_PT_PSB_LEN - 1; i; i--) {
if (i > decoder->len)
continue;
if (!memcmp(end - i, INTEL_PT_PSB_STR, i))
return i;
}
return 0;
}
static int intel_pt_rest_psb(struct intel_pt_decoder *decoder, int part_psb)
{
size_t rest_psb = INTEL_PT_PSB_LEN - part_psb;
const char *psb = INTEL_PT_PSB_STR;
if (rest_psb > decoder->len ||
memcmp(decoder->buf, psb + part_psb, rest_psb))
return 0;
return rest_psb;
}
static int intel_pt_get_split_psb(struct intel_pt_decoder *decoder,
int part_psb)
{
int rest_psb, ret;
decoder->pos += decoder->len;
decoder->len = 0;
ret = intel_pt_get_next_data(decoder, false);
if (ret)
return ret;
rest_psb = intel_pt_rest_psb(decoder, part_psb);
if (!rest_psb)
return 0;
decoder->pos -= part_psb;
decoder->next_buf = decoder->buf + rest_psb;
decoder->next_len = decoder->len - rest_psb;
memcpy(decoder->temp_buf, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
decoder->buf = decoder->temp_buf;
decoder->len = INTEL_PT_PSB_LEN;
return 0;
}
static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder)
{
unsigned char *next;
int ret;
intel_pt_log("Scanning for PSB\n");
while (1) {
if (!decoder->len) {
ret = intel_pt_get_next_data(decoder, false);
if (ret)
return ret;
}
next = memmem(decoder->buf, decoder->len, INTEL_PT_PSB_STR,
INTEL_PT_PSB_LEN);
if (!next) {
int part_psb;
part_psb = intel_pt_part_psb(decoder);
if (part_psb) {
ret = intel_pt_get_split_psb(decoder, part_psb);
if (ret)
return ret;
} else {
decoder->pos += decoder->len;
decoder->len = 0;
}
continue;
}
decoder->pkt_step = next - decoder->buf;
return intel_pt_get_next_packet(decoder);
}
}
static int intel_pt_sync(struct intel_pt_decoder *decoder)
{
int err;
decoder->pge = false;
decoder->continuous_period = false;
decoder->have_last_ip = false;
decoder->last_ip = 0;
decoder->psb_ip = 0;
decoder->ip = 0;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_scan_for_psb(decoder);
if (err)
return err;
if (decoder->vm_time_correlation) {
decoder->in_psb = true;
if (!decoder->timestamp)
decoder->timestamp = 1;
decoder->state.type = 0;
decoder->pkt_state = INTEL_PT_STATE_VM_TIME_CORRELATION;
return 0;
}
decoder->have_last_ip = true;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
err = intel_pt_walk_psb(decoder);
if (err)
return err;
decoder->state.type = INTEL_PT_PSB_EVT; /* Only PSB sample */
decoder->state.from_ip = decoder->psb_ip;
decoder->state.to_ip = 0;
if (decoder->ip) {
/*
* In hop mode, resample to get the PSB FUP ip as an
* "instruction" sample.
*/
if (decoder->hop)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
}
return 0;
}
static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t est = decoder->sample_insn_cnt << 1;
if (!decoder->cbr || !decoder->max_non_turbo_ratio)
goto out;
est *= decoder->max_non_turbo_ratio;
est /= decoder->cbr;
out:
return decoder->sample_timestamp + est;
}
const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
{
int err;
do {
decoder->state.type = INTEL_PT_BRANCH;
decoder->state.flags = 0;
switch (decoder->pkt_state) {
case INTEL_PT_STATE_NO_PSB:
err = intel_pt_sync(decoder);
break;
case INTEL_PT_STATE_NO_IP:
decoder->have_last_ip = false;
decoder->last_ip = 0;
decoder->ip = 0;
fallthrough;
case INTEL_PT_STATE_ERR_RESYNC:
err = intel_pt_sync_ip(decoder);
break;
case INTEL_PT_STATE_IN_SYNC:
err = intel_pt_walk_trace(decoder);
break;
case INTEL_PT_STATE_TNT:
case INTEL_PT_STATE_TNT_CONT:
err = intel_pt_walk_tnt(decoder);
if (err == -EAGAIN)
err = intel_pt_walk_trace(decoder);
break;
case INTEL_PT_STATE_TIP:
case INTEL_PT_STATE_TIP_PGD:
err = intel_pt_walk_tip(decoder);
break;
case INTEL_PT_STATE_FUP:
err = intel_pt_walk_fup(decoder);
if (err == -EAGAIN)
err = intel_pt_walk_fup_tip(decoder);
break;
case INTEL_PT_STATE_FUP_NO_TIP:
err = intel_pt_walk_fup(decoder);
if (err == -EAGAIN)
err = intel_pt_walk_trace(decoder);
break;
case INTEL_PT_STATE_FUP_IN_PSB:
err = intel_pt_fup_in_psb(decoder);
break;
case INTEL_PT_STATE_RESAMPLE:
err = intel_pt_resample(decoder);
break;
case INTEL_PT_STATE_VM_TIME_CORRELATION:
err = intel_pt_vm_time_correlation(decoder);
break;
default:
err = intel_pt_bug(decoder);
break;
}
} while (err == -ENOLINK);
if (err) {
decoder->state.err = intel_pt_ext_err(err);
if (err != -EOVERFLOW)
decoder->state.from_ip = decoder->ip;
intel_pt_update_sample_time(decoder);
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
intel_pt_set_nr(decoder);
} else {
decoder->state.err = 0;
if (decoder->cbr != decoder->cbr_seen) {
decoder->cbr_seen = decoder->cbr;
if (!decoder->state.type) {
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
}
decoder->state.type |= INTEL_PT_CBR_CHG;
decoder->state.cbr_payload = decoder->cbr_payload;
decoder->state.cbr = decoder->cbr;
}
if (intel_pt_sample_time(decoder->pkt_state)) {
intel_pt_update_sample_time(decoder);
if (decoder->sample_cyc) {
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
decoder->sample_cyc = false;
}
}
/*
* When using only TSC/MTC to compute cycles, IPC can be
* sampled as soon as the cycle count changes.
*/
if (!decoder->have_cyc)
decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
}
/* Let PSB event always have TSC timestamp */
if ((decoder->state.type & INTEL_PT_PSB_EVT) && decoder->tsc_timestamp)
decoder->sample_timestamp = decoder->tsc_timestamp;
decoder->state.from_nr = decoder->nr;
decoder->state.to_nr = decoder->next_nr;
decoder->nr = decoder->next_nr;
decoder->state.timestamp = decoder->sample_timestamp;
decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
decoder->state.tot_cyc_cnt = decoder->sample_tot_cyc_cnt;
return &decoder->state;
}
/**
* intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
* @buf: pointer to buffer pointer
* @len: size of buffer
*
* Updates the buffer pointer to point to the start of the next PSB packet if
* there is one, otherwise the buffer pointer is unchanged. If @buf is updated,
* @len is adjusted accordingly.
*
* Return: %true if a PSB packet is found, %false otherwise.
*/
static bool intel_pt_next_psb(unsigned char **buf, size_t *len)
{
unsigned char *next;
next = memmem(*buf, *len, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
if (next) {
*len -= next - *buf;
*buf = next;
return true;
}
return false;
}
/**
* intel_pt_step_psb - move buffer pointer to the start of the following PSB
* packet.
* @buf: pointer to buffer pointer
* @len: size of buffer
*
* Updates the buffer pointer to point to the start of the following PSB packet
* (skipping the PSB at @buf itself) if there is one, otherwise the buffer
* pointer is unchanged. If @buf is updated, @len is adjusted accordingly.
*
* Return: %true if a PSB packet is found, %false otherwise.
*/
static bool intel_pt_step_psb(unsigned char **buf, size_t *len)
{
unsigned char *next;
if (!*len)
return false;
next = memmem(*buf + 1, *len - 1, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
if (next) {
*len -= next - *buf;
*buf = next;
return true;
}
return false;
}
/**
* intel_pt_last_psb - find the last PSB packet in a buffer.
* @buf: buffer
* @len: size of buffer
*
* This function finds the last PSB in a buffer.
*
* Return: A pointer to the last PSB in @buf if found, %NULL otherwise.
*/
static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
{
const char *n = INTEL_PT_PSB_STR;
unsigned char *p;
size_t k;
if (len < INTEL_PT_PSB_LEN)
return NULL;
k = len - INTEL_PT_PSB_LEN + 1;
while (1) {
p = memrchr(buf, n[0], k);
if (!p)
return NULL;
if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1))
return p;
k = p - buf;
if (!k)
return NULL;
}
}
/**
* intel_pt_next_tsc - find and return next TSC.
* @buf: buffer
* @len: size of buffer
* @tsc: TSC value returned
* @rem: returns remaining size when TSC is found
*
* Find a TSC packet in @buf and return the TSC value. This function assumes
* that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
* PSBEND packet is found.
*
* Return: %true if TSC is found, false otherwise.
*/
static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
size_t *rem)
{
enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
struct intel_pt_pkt packet;
int ret;
while (len) {
ret = intel_pt_get_packet(buf, len, &packet, &ctx);
if (ret <= 0)
return false;
if (packet.type == INTEL_PT_TSC) {
*tsc = packet.payload;
*rem = len;
return true;
}
if (packet.type == INTEL_PT_PSBEND)
return false;
buf += ret;
len -= ret;
}
return false;
}
/**
* intel_pt_tsc_cmp - compare 7-byte TSCs.
* @tsc1: first TSC to compare
* @tsc2: second TSC to compare
*
* This function compares 7-byte TSC values allowing for the possibility that
* TSC wrapped around. Generally it is not possible to know if TSC has wrapped
* around so for that purpose this function assumes the absolute difference is
* less than half the maximum difference.
*
* Return: %-1 if @tsc1 is before @tsc2, %0 if @tsc1 == @tsc2, %1 if @tsc1 is
* after @tsc2.
*/
static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
{
const uint64_t halfway = (1ULL << 55);
if (tsc1 == tsc2)
return 0;
if (tsc1 < tsc2) {
if (tsc2 - tsc1 < halfway)
return -1;
else
return 1;
} else {
if (tsc1 - tsc2 < halfway)
return 1;
else
return -1;
}
}
#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
/**
* adj_for_padding - adjust overlap to account for padding.
* @buf_b: second buffer
* @buf_a: first buffer
* @len_a: size of first buffer
*
* @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
* accordingly.
*
* Return: A pointer into @buf_b from where non-overlapped data starts
*/
static unsigned char *adj_for_padding(unsigned char *buf_b,
unsigned char *buf_a, size_t len_a)
{
unsigned char *p = buf_b - MAX_PADDING;
unsigned char *q = buf_a + len_a - MAX_PADDING;
int i;
for (i = MAX_PADDING; i; i--, p++, q++) {
if (*p != *q)
break;
}
return p;
}
/**
* intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
* using TSC.
* @buf_a: first buffer
* @len_a: size of first buffer
* @buf_b: second buffer
* @len_b: size of second buffer
* @consecutive: returns true if there is data in buf_b that is consecutive
* to buf_a
* @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
*
* If the trace contains TSC we can look at the last TSC of @buf_a and the
* first TSC of @buf_b in order to determine if the buffers overlap, and then
* walk forward in @buf_b until a later TSC is found. A precondition is that
* @buf_a and @buf_b are positioned at a PSB.
*
* Return: A pointer into @buf_b from where non-overlapped data starts, or
* @buf_b + @len_b if there is no non-overlapped data.
*/
static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
size_t len_a,
unsigned char *buf_b,
size_t len_b, bool *consecutive,
bool ooo_tsc)
{
uint64_t tsc_a, tsc_b;
unsigned char *p;
size_t len, rem_a, rem_b;
p = intel_pt_last_psb(buf_a, len_a);
if (!p)
return buf_b; /* No PSB in buf_a => no overlap */
len = len_a - (p - buf_a);
if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
/* The last PSB+ in buf_a is incomplete, so go back one more */
len_a -= len;
p = intel_pt_last_psb(buf_a, len_a);
if (!p)
return buf_b; /* No full PSB+ => assume no overlap */
len = len_a - (p - buf_a);
if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
return buf_b; /* No TSC in buf_a => assume no overlap */
}
while (1) {
/* Ignore PSB+ with no TSC */
if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
/* Same TSC, so buffers are consecutive */
if (!cmp && rem_b >= rem_a) {
unsigned char *start;
*consecutive = true;
start = buf_b + len_b - (rem_b - rem_a);
return adj_for_padding(start, buf_a, len_a);
}
if (cmp < 0 && !ooo_tsc)
return buf_b; /* tsc_a < tsc_b => no overlap */
}
if (!intel_pt_step_psb(&buf_b, &len_b))
return buf_b + len_b; /* No PSB in buf_b => no data */
}
}
/**
* intel_pt_find_overlap - determine start of non-overlapped trace data.
* @buf_a: first buffer
* @len_a: size of first buffer
* @buf_b: second buffer
* @len_b: size of second buffer
* @have_tsc: can use TSC packets to detect overlap
* @consecutive: returns true if there is data in buf_b that is consecutive
* to buf_a
* @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
*
* When trace samples or snapshots are recorded there is the possibility that
* the data overlaps. Note that, for the purposes of decoding, data is only
* useful if it begins with a PSB packet.
*
* Return: A pointer into @buf_b from where non-overlapped data starts, or
* @buf_b + @len_b if there is no non-overlapped data.
*/
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
bool have_tsc, bool *consecutive,
bool ooo_tsc)
{
unsigned char *found;
/* Buffer 'b' must start at PSB so throw away everything before that */
if (!intel_pt_next_psb(&buf_b, &len_b))
return buf_b + len_b; /* No PSB */
if (!intel_pt_next_psb(&buf_a, &len_a))
return buf_b; /* No overlap */
if (have_tsc) {
found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
consecutive, ooo_tsc);
if (found)
return found;
}
/*
* Buffer 'b' cannot end within buffer 'a' so, for comparison purposes,
* we can ignore the first part of buffer 'a'.
*/
while (len_b < len_a) {
if (!intel_pt_step_psb(&buf_a, &len_a))
return buf_b; /* No overlap */
}
/* Now len_b >= len_a */
while (1) {
/* Potential overlap so check the bytes */
found = memmem(buf_a, len_a, buf_b, len_a);
if (found) {
*consecutive = true;
return adj_for_padding(buf_b + len_a, buf_a, len_a);
}
/* Try again at next PSB in buffer 'a' */
if (!intel_pt_step_psb(&buf_a, &len_a))
return buf_b; /* No overlap */
}
}
/**
* struct fast_forward_data - data used by intel_pt_ff_cb().
* @timestamp: timestamp to fast forward towards
* @buf_timestamp: buffer timestamp of last buffer with trace data earlier than
* the fast forward timestamp.
*/
struct fast_forward_data {
uint64_t timestamp;
uint64_t buf_timestamp;
};
/**
* intel_pt_ff_cb - fast forward lookahead callback.
* @buffer: Intel PT trace buffer
* @data: opaque pointer to fast forward data (struct fast_forward_data)
*
* Determine if @buffer trace is past the fast forward timestamp.
*
* Return: 1 (stop lookahead) if @buffer trace is past the fast forward
* timestamp, and 0 otherwise.
*/
static int intel_pt_ff_cb(struct intel_pt_buffer *buffer, void *data)
{
struct fast_forward_data *d = data;
unsigned char *buf;
uint64_t tsc;
size_t rem;
size_t len;
buf = (unsigned char *)buffer->buf;
len = buffer->len;
if (!intel_pt_next_psb(&buf, &len) ||
!intel_pt_next_tsc(buf, len, &tsc, &rem))
return 0;
tsc = intel_pt_8b_tsc(tsc, buffer->ref_timestamp);
intel_pt_log("Buffer 1st timestamp " x64_fmt " ref timestamp " x64_fmt "\n",
tsc, buffer->ref_timestamp);
/*
* If the buffer contains a timestamp earlier that the fast forward
* timestamp, then record it, else stop.
*/
if (tsc < d->timestamp)
d->buf_timestamp = buffer->ref_timestamp;
else
return 1;
return 0;
}
/**
* intel_pt_fast_forward - reposition decoder forwards.
* @decoder: Intel PT decoder
* @timestamp: timestamp to fast forward towards
*
* Reposition decoder at the last PSB with a timestamp earlier than @timestamp.
*
* Return: 0 on success or negative error code on failure.
*/
int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp)
{
struct fast_forward_data d = { .timestamp = timestamp };
unsigned char *buf;
size_t len;
int err;
intel_pt_log("Fast forward towards timestamp " x64_fmt "\n", timestamp);
/* Find buffer timestamp of buffer to fast forward to */
err = decoder->lookahead(decoder->data, intel_pt_ff_cb, &d);
if (err < 0)
return err;
/* Walk to buffer with same buffer timestamp */
if (d.buf_timestamp) {
do {
decoder->pos += decoder->len;
decoder->len = 0;
err = intel_pt_get_next_data(decoder, true);
/* -ENOLINK means non-consecutive trace */
if (err && err != -ENOLINK)
return err;
} while (decoder->buf_timestamp != d.buf_timestamp);
}
if (!decoder->buf)
return 0;
buf = (unsigned char *)decoder->buf;
len = decoder->len;
if (!intel_pt_next_psb(&buf, &len))
return 0;
/*
* Walk PSBs while the PSB timestamp is less than the fast forward
* timestamp.
*/
do {
uint64_t tsc;
size_t rem;
if (!intel_pt_next_tsc(buf, len, &tsc, &rem))
break;
tsc = intel_pt_8b_tsc(tsc, decoder->buf_timestamp);
/*
* A TSC packet can slip past MTC packets but, after fast
* forward, decoding starts at the TSC timestamp. That means
* the timestamps may not be exactly the same as the timestamps
* that would have been decoded without fast forward.
*/
if (tsc < timestamp) {
intel_pt_log("Fast forward to next PSB timestamp " x64_fmt "\n", tsc);
decoder->pos += decoder->len - len;
decoder->buf = buf;
decoder->len = len;
intel_pt_reposition(decoder);
} else {
break;
}
} while (intel_pt_step_psb(&buf, &len));
return 0;
}
| linux-master | tools/perf/util/intel-pt-decoder/intel-pt-decoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_pkt_decoder.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
*/
#include <stdio.h>
#include <string.h>
#include <endian.h>
#include <byteswap.h>
#include <linux/compiler.h>
#include "intel-pt-pkt-decoder.h"
#define BIT(n) (1 << (n))
#define BIT63 ((uint64_t)1 << 63)
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define le16_to_cpu bswap_16
#define le32_to_cpu bswap_32
#define le64_to_cpu bswap_64
#define memcpy_le64(d, s, n) do { \
memcpy((d), (s), (n)); \
*(d) = le64_to_cpu(*(d)); \
} while (0)
#else
#define le16_to_cpu
#define le32_to_cpu
#define le64_to_cpu
#define memcpy_le64 memcpy
#endif
static const char * const packet_name[] = {
[INTEL_PT_BAD] = "Bad Packet!",
[INTEL_PT_PAD] = "PAD",
[INTEL_PT_TNT] = "TNT",
[INTEL_PT_TIP_PGD] = "TIP.PGD",
[INTEL_PT_TIP_PGE] = "TIP.PGE",
[INTEL_PT_TSC] = "TSC",
[INTEL_PT_TMA] = "TMA",
[INTEL_PT_MODE_EXEC] = "MODE.Exec",
[INTEL_PT_MODE_TSX] = "MODE.TSX",
[INTEL_PT_MTC] = "MTC",
[INTEL_PT_TIP] = "TIP",
[INTEL_PT_FUP] = "FUP",
[INTEL_PT_CYC] = "CYC",
[INTEL_PT_VMCS] = "VMCS",
[INTEL_PT_PSB] = "PSB",
[INTEL_PT_PSBEND] = "PSBEND",
[INTEL_PT_CBR] = "CBR",
[INTEL_PT_TRACESTOP] = "TraceSTOP",
[INTEL_PT_PIP] = "PIP",
[INTEL_PT_OVF] = "OVF",
[INTEL_PT_MNT] = "MNT",
[INTEL_PT_PTWRITE] = "PTWRITE",
[INTEL_PT_PTWRITE_IP] = "PTWRITE",
[INTEL_PT_EXSTOP] = "EXSTOP",
[INTEL_PT_EXSTOP_IP] = "EXSTOP",
[INTEL_PT_MWAIT] = "MWAIT",
[INTEL_PT_PWRE] = "PWRE",
[INTEL_PT_PWRX] = "PWRX",
[INTEL_PT_BBP] = "BBP",
[INTEL_PT_BIP] = "BIP",
[INTEL_PT_BEP] = "BEP",
[INTEL_PT_BEP_IP] = "BEP",
[INTEL_PT_CFE] = "CFE",
[INTEL_PT_CFE_IP] = "CFE",
[INTEL_PT_EVD] = "EVD",
};
const char *intel_pt_pkt_name(enum intel_pt_pkt_type type)
{
return packet_name[type];
}
static int intel_pt_get_long_tnt(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
uint64_t payload;
int count;
if (len < 8)
return INTEL_PT_NEED_MORE_BYTES;
payload = le64_to_cpu(*(uint64_t *)buf);
for (count = 47; count; count--) {
if (payload & BIT63)
break;
payload <<= 1;
}
packet->type = INTEL_PT_TNT;
packet->count = count;
packet->payload = payload << 1;
return 8;
}
static int intel_pt_get_pip(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
uint64_t payload = 0;
if (len < 8)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_PIP;
memcpy_le64(&payload, buf + 2, 6);
packet->payload = payload;
return 8;
}
static int intel_pt_get_tracestop(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_TRACESTOP;
return 2;
}
static int intel_pt_get_cbr(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 4)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_CBR;
packet->payload = le16_to_cpu(*(uint16_t *)(buf + 2));
return 4;
}
static int intel_pt_get_vmcs(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
unsigned int count = (52 - 5) >> 3;
if (count < 1 || count > 7)
return INTEL_PT_BAD_PACKET;
if (len < count + 2)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_VMCS;
packet->count = count;
memcpy_le64(&packet->payload, buf + 2, count);
return count + 2;
}
static int intel_pt_get_ovf(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_OVF;
return 2;
}
static int intel_pt_get_psb(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
int i;
if (len < 16)
return INTEL_PT_NEED_MORE_BYTES;
for (i = 2; i < 16; i += 2) {
if (buf[i] != 2 || buf[i + 1] != 0x82)
return INTEL_PT_BAD_PACKET;
}
packet->type = INTEL_PT_PSB;
return 16;
}
static int intel_pt_get_psbend(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_PSBEND;
return 2;
}
static int intel_pt_get_tma(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_TMA;
packet->payload = buf[2] | (buf[3] << 8);
packet->count = buf[5] | ((buf[6] & BIT(0)) << 8);
return 7;
}
static int intel_pt_get_pad(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_PAD;
return 1;
}
static int intel_pt_get_mnt(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 11)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MNT;
memcpy_le64(&packet->payload, buf + 3, 8);
return 11;
}
static int intel_pt_get_3byte(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
switch (buf[2]) {
case 0x88: /* MNT */
return intel_pt_get_mnt(buf, len, packet);
default:
return INTEL_PT_BAD_PACKET;
}
}
static int intel_pt_get_ptwrite(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
packet->count = (buf[1] >> 5) & 0x3;
packet->type = buf[1] & BIT(7) ? INTEL_PT_PTWRITE_IP :
INTEL_PT_PTWRITE;
switch (packet->count) {
case 0:
if (len < 6)
return INTEL_PT_NEED_MORE_BYTES;
packet->payload = le32_to_cpu(*(uint32_t *)(buf + 2));
return 6;
case 1:
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
return 10;
default:
return INTEL_PT_BAD_PACKET;
}
}
static int intel_pt_get_exstop(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_EXSTOP;
return 2;
}
static int intel_pt_get_exstop_ip(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_EXSTOP_IP;
return 2;
}
static int intel_pt_get_mwait(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MWAIT;
packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
return 10;
}
static int intel_pt_get_pwre(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 4)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_PWRE;
memcpy_le64(&packet->payload, buf + 2, 2);
return 4;
}
static int intel_pt_get_pwrx(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_PWRX;
memcpy_le64(&packet->payload, buf + 2, 5);
return 7;
}
static int intel_pt_get_bbp(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_BBP;
packet->count = buf[2] >> 7;
packet->payload = buf[2] & 0x1f;
return 3;
}
static int intel_pt_get_bip_4(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 5)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_BIP;
packet->count = buf[0] >> 3;
memcpy_le64(&packet->payload, buf + 1, 4);
return 5;
}
static int intel_pt_get_bip_8(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 9)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_BIP;
packet->count = buf[0] >> 3;
memcpy_le64(&packet->payload, buf + 1, 8);
return 9;
}
static int intel_pt_get_bep(size_t len, struct intel_pt_pkt *packet)
{
if (len < 2)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_BEP;
return 2;
}
static int intel_pt_get_bep_ip(size_t len, struct intel_pt_pkt *packet)
{
if (len < 2)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_BEP_IP;
return 2;
}
static int intel_pt_get_cfe(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 4)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = buf[2] & 0x80 ? INTEL_PT_CFE_IP : INTEL_PT_CFE;
packet->count = buf[2] & 0x1f;
packet->payload = buf[3];
return 4;
}
static int intel_pt_get_evd(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 11)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_EVD;
packet->count = buf[2] & 0x3f;
packet->payload = buf[3];
memcpy_le64(&packet->payload, buf + 3, 8);
return 11;
}
static int intel_pt_get_ext(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 2)
return INTEL_PT_NEED_MORE_BYTES;
if ((buf[1] & 0x1f) == 0x12)
return intel_pt_get_ptwrite(buf, len, packet);
switch (buf[1]) {
case 0xa3: /* Long TNT */
return intel_pt_get_long_tnt(buf, len, packet);
case 0x43: /* PIP */
return intel_pt_get_pip(buf, len, packet);
case 0x83: /* TraceStop */
return intel_pt_get_tracestop(packet);
case 0x03: /* CBR */
return intel_pt_get_cbr(buf, len, packet);
case 0xc8: /* VMCS */
return intel_pt_get_vmcs(buf, len, packet);
case 0xf3: /* OVF */
return intel_pt_get_ovf(packet);
case 0x82: /* PSB */
return intel_pt_get_psb(buf, len, packet);
case 0x23: /* PSBEND */
return intel_pt_get_psbend(packet);
case 0x73: /* TMA */
return intel_pt_get_tma(buf, len, packet);
case 0xC3: /* 3-byte header */
return intel_pt_get_3byte(buf, len, packet);
case 0x62: /* EXSTOP no IP */
return intel_pt_get_exstop(packet);
case 0xE2: /* EXSTOP with IP */
return intel_pt_get_exstop_ip(packet);
case 0xC2: /* MWAIT */
return intel_pt_get_mwait(buf, len, packet);
case 0x22: /* PWRE */
return intel_pt_get_pwre(buf, len, packet);
case 0xA2: /* PWRX */
return intel_pt_get_pwrx(buf, len, packet);
case 0x63: /* BBP */
return intel_pt_get_bbp(buf, len, packet);
case 0x33: /* BEP no IP */
return intel_pt_get_bep(len, packet);
case 0xb3: /* BEP with IP */
return intel_pt_get_bep_ip(len, packet);
case 0x13: /* CFE */
return intel_pt_get_cfe(buf, len, packet);
case 0x53: /* EVD */
return intel_pt_get_evd(buf, len, packet);
default:
return INTEL_PT_BAD_PACKET;
}
}
static int intel_pt_get_short_tnt(unsigned int byte,
struct intel_pt_pkt *packet)
{
int count;
for (count = 6; count; count--) {
if (byte & BIT(7))
break;
byte <<= 1;
}
packet->type = INTEL_PT_TNT;
packet->count = count;
packet->payload = (uint64_t)byte << 57;
return 1;
}
static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
size_t len, struct intel_pt_pkt *packet)
{
unsigned int offs = 1, shift;
uint64_t payload = byte >> 3;
byte >>= 2;
len -= 1;
for (shift = 5; byte & 1; shift += 7) {
if (offs > 9)
return INTEL_PT_BAD_PACKET;
if (len < offs)
return INTEL_PT_NEED_MORE_BYTES;
byte = buf[offs++];
payload |= ((uint64_t)byte >> 1) << shift;
}
packet->type = INTEL_PT_CYC;
packet->payload = payload;
return offs;
}
static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
int ip_len;
packet->count = byte >> 5;
switch (packet->count) {
case 0:
ip_len = 0;
break;
case 1:
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 2;
packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
break;
case 2:
if (len < 5)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 4;
packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
break;
case 3:
case 4:
if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 6;
memcpy_le64(&packet->payload, buf + 1, 6);
break;
case 6:
if (len < 9)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 8;
packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
break;
default:
return INTEL_PT_BAD_PACKET;
}
packet->type = type;
return ip_len + 1;
}
static int intel_pt_get_mode(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 2)
return INTEL_PT_NEED_MORE_BYTES;
switch (buf[1] >> 5) {
case 0:
packet->type = INTEL_PT_MODE_EXEC;
packet->count = buf[1];
switch (buf[1] & 3) {
case 0:
packet->payload = 16;
break;
case 1:
packet->payload = 64;
break;
case 2:
packet->payload = 32;
break;
default:
return INTEL_PT_BAD_PACKET;
}
break;
case 1:
packet->type = INTEL_PT_MODE_TSX;
if ((buf[1] & 3) == 3)
return INTEL_PT_BAD_PACKET;
packet->payload = buf[1] & 3;
break;
default:
return INTEL_PT_BAD_PACKET;
}
return 2;
}
static int intel_pt_get_tsc(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 8)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_TSC;
memcpy_le64(&packet->payload, buf + 1, 7);
return 8;
}
static int intel_pt_get_mtc(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 2)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MTC;
packet->payload = buf[1];
return 2;
}
static int intel_pt_do_get_packet(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet,
enum intel_pt_pkt_ctx ctx)
{
unsigned int byte;
memset(packet, 0, sizeof(struct intel_pt_pkt));
if (!len)
return INTEL_PT_NEED_MORE_BYTES;
byte = buf[0];
switch (ctx) {
case INTEL_PT_NO_CTX:
break;
case INTEL_PT_BLK_4_CTX:
if ((byte & 0x7) == 4)
return intel_pt_get_bip_4(buf, len, packet);
break;
case INTEL_PT_BLK_8_CTX:
if ((byte & 0x7) == 4)
return intel_pt_get_bip_8(buf, len, packet);
break;
default:
break;
}
if (!(byte & BIT(0))) {
if (byte == 0)
return intel_pt_get_pad(packet);
if (byte == 2)
return intel_pt_get_ext(buf, len, packet);
return intel_pt_get_short_tnt(byte, packet);
}
if ((byte & 2))
return intel_pt_get_cyc(byte, buf, len, packet);
switch (byte & 0x1f) {
case 0x0D:
return intel_pt_get_ip(INTEL_PT_TIP, byte, buf, len, packet);
case 0x11:
return intel_pt_get_ip(INTEL_PT_TIP_PGE, byte, buf, len,
packet);
case 0x01:
return intel_pt_get_ip(INTEL_PT_TIP_PGD, byte, buf, len,
packet);
case 0x1D:
return intel_pt_get_ip(INTEL_PT_FUP, byte, buf, len, packet);
case 0x19:
switch (byte) {
case 0x99:
return intel_pt_get_mode(buf, len, packet);
case 0x19:
return intel_pt_get_tsc(buf, len, packet);
case 0x59:
return intel_pt_get_mtc(buf, len, packet);
default:
return INTEL_PT_BAD_PACKET;
}
default:
return INTEL_PT_BAD_PACKET;
}
}
void intel_pt_upd_pkt_ctx(const struct intel_pt_pkt *packet,
enum intel_pt_pkt_ctx *ctx)
{
switch (packet->type) {
case INTEL_PT_BAD:
case INTEL_PT_PAD:
case INTEL_PT_TSC:
case INTEL_PT_TMA:
case INTEL_PT_MTC:
case INTEL_PT_FUP:
case INTEL_PT_CYC:
case INTEL_PT_CBR:
case INTEL_PT_MNT:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_BIP:
break;
case INTEL_PT_TNT:
case INTEL_PT_TIP:
case INTEL_PT_TIP_PGD:
case INTEL_PT_TIP_PGE:
case INTEL_PT_MODE_EXEC:
case INTEL_PT_MODE_TSX:
case INTEL_PT_PIP:
case INTEL_PT_OVF:
case INTEL_PT_VMCS:
case INTEL_PT_TRACESTOP:
case INTEL_PT_PSB:
case INTEL_PT_PSBEND:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_BEP:
case INTEL_PT_BEP_IP:
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
case INTEL_PT_EVD:
*ctx = INTEL_PT_NO_CTX;
break;
case INTEL_PT_BBP:
if (packet->count)
*ctx = INTEL_PT_BLK_4_CTX;
else
*ctx = INTEL_PT_BLK_8_CTX;
break;
default:
break;
}
}
int intel_pt_get_packet(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet, enum intel_pt_pkt_ctx *ctx)
{
int ret;
ret = intel_pt_do_get_packet(buf, len, packet, *ctx);
if (ret > 0) {
while (ret < 8 && len > (size_t)ret && !buf[ret])
ret += 1;
intel_pt_upd_pkt_ctx(packet, ctx);
}
return ret;
}
int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
size_t buf_len)
{
int ret, i, nr;
unsigned long long payload = packet->payload;
const char *name = intel_pt_pkt_name(packet->type);
switch (packet->type) {
case INTEL_PT_BAD:
case INTEL_PT_PAD:
case INTEL_PT_PSB:
case INTEL_PT_PSBEND:
case INTEL_PT_TRACESTOP:
case INTEL_PT_OVF:
return snprintf(buf, buf_len, "%s", name);
case INTEL_PT_TNT: {
size_t blen = buf_len;
ret = snprintf(buf, blen, "%s ", name);
if (ret < 0)
return ret;
buf += ret;
blen -= ret;
for (i = 0; i < packet->count; i++) {
if (payload & BIT63)
ret = snprintf(buf, blen, "T");
else
ret = snprintf(buf, blen, "N");
if (ret < 0)
return ret;
buf += ret;
blen -= ret;
payload <<= 1;
}
ret = snprintf(buf, blen, " (%d)", packet->count);
if (ret < 0)
return ret;
blen -= ret;
return buf_len - blen;
}
case INTEL_PT_TIP_PGD:
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
case INTEL_PT_FUP:
if (!(packet->count))
return snprintf(buf, buf_len, "%s no ip", name);
fallthrough;
case INTEL_PT_CYC:
case INTEL_PT_VMCS:
case INTEL_PT_MTC:
case INTEL_PT_MNT:
case INTEL_PT_CBR:
case INTEL_PT_TSC:
return snprintf(buf, buf_len, "%s 0x%llx", name, payload);
case INTEL_PT_TMA:
return snprintf(buf, buf_len, "%s CTC 0x%x FC 0x%x", name,
(unsigned)payload, packet->count);
case INTEL_PT_MODE_EXEC:
return snprintf(buf, buf_len, "%s IF:%d %lld",
name, !!(packet->count & 4), payload);
case INTEL_PT_MODE_TSX:
return snprintf(buf, buf_len, "%s TXAbort:%u InTX:%u",
name, (unsigned)(payload >> 1) & 1,
(unsigned)payload & 1);
case INTEL_PT_PIP:
nr = packet->payload & INTEL_PT_VMX_NR_FLAG ? 1 : 0;
payload &= ~INTEL_PT_VMX_NR_FLAG;
ret = snprintf(buf, buf_len, "%s 0x%llx (NR=%d)",
name, payload >> 1, nr);
return ret;
case INTEL_PT_PTWRITE:
return snprintf(buf, buf_len, "%s 0x%llx IP:0", name, payload);
case INTEL_PT_PTWRITE_IP:
return snprintf(buf, buf_len, "%s 0x%llx IP:1", name, payload);
case INTEL_PT_BEP:
case INTEL_PT_EXSTOP:
return snprintf(buf, buf_len, "%s IP:0", name);
case INTEL_PT_BEP_IP:
case INTEL_PT_EXSTOP_IP:
return snprintf(buf, buf_len, "%s IP:1", name);
case INTEL_PT_MWAIT:
return snprintf(buf, buf_len, "%s 0x%llx Hints 0x%x Extensions 0x%x",
name, payload, (unsigned int)(payload & 0xff),
(unsigned int)((payload >> 32) & 0x3));
case INTEL_PT_PWRE:
return snprintf(buf, buf_len, "%s 0x%llx HW:%u CState:%u Sub-CState:%u",
name, payload, !!(payload & 0x80),
(unsigned int)((payload >> 12) & 0xf),
(unsigned int)((payload >> 8) & 0xf));
case INTEL_PT_PWRX:
return snprintf(buf, buf_len, "%s 0x%llx Last CState:%u Deepest CState:%u Wake Reason 0x%x",
name, payload,
(unsigned int)((payload >> 4) & 0xf),
(unsigned int)(payload & 0xf),
(unsigned int)((payload >> 8) & 0xf));
case INTEL_PT_BBP:
return snprintf(buf, buf_len, "%s SZ %s-byte Type 0x%llx",
name, packet->count ? "4" : "8", payload);
case INTEL_PT_BIP:
return snprintf(buf, buf_len, "%s ID 0x%02x Value 0x%llx",
name, packet->count, payload);
case INTEL_PT_CFE:
case INTEL_PT_CFE_IP:
return snprintf(buf, buf_len, "%s IP:%d Type 0x%02x Vector 0x%llx",
name, packet->type == INTEL_PT_CFE_IP, packet->count, payload);
case INTEL_PT_EVD:
return snprintf(buf, buf_len, "%s Type 0x%02x Payload 0x%llx",
name, packet->count, payload);
default:
break;
}
return snprintf(buf, buf_len, "%s 0x%llx (%d)",
name, payload, packet->count);
}
| linux-master | tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_log.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdarg.h>
#include <stdbool.h>
#include <string.h>
#include <linux/zalloc.h>
#include <linux/kernel.h>
#include "intel-pt-log.h"
#include "intel-pt-insn-decoder.h"
#include "intel-pt-pkt-decoder.h"
#define MAX_LOG_NAME 256
#define DFLT_BUF_SZ (16 * 1024)
struct log_buf {
char *buf;
size_t buf_sz;
size_t head;
bool wrapped;
FILE *backend;
};
static FILE *f;
static char log_name[MAX_LOG_NAME];
bool intel_pt_enable_logging;
static bool intel_pt_dump_log_on_error;
static unsigned int intel_pt_log_on_error_size;
static struct log_buf log_buf;
void *intel_pt_log_fp(void)
{
return f;
}
void intel_pt_log_enable(bool dump_log_on_error, unsigned int log_on_error_size)
{
intel_pt_enable_logging = true;
intel_pt_dump_log_on_error = dump_log_on_error;
intel_pt_log_on_error_size = log_on_error_size;
}
void intel_pt_log_disable(void)
{
if (f)
fflush(f);
intel_pt_enable_logging = false;
}
void intel_pt_log_set_name(const char *name)
{
strncpy(log_name, name, MAX_LOG_NAME - 5);
strcat(log_name, ".log");
}
static void intel_pt_print_data(const unsigned char *buf, int len, uint64_t pos,
int indent)
{
int i;
for (i = 0; i < indent; i++)
fprintf(f, " ");
fprintf(f, " %08" PRIx64 ": ", pos);
for (i = 0; i < len; i++)
fprintf(f, " %02x", buf[i]);
for (; i < 16; i++)
fprintf(f, " ");
fprintf(f, " ");
}
static void intel_pt_print_no_data(uint64_t pos, int indent)
{
int i;
for (i = 0; i < indent; i++)
fprintf(f, " ");
fprintf(f, " %08" PRIx64 ": ", pos);
for (i = 0; i < 16; i++)
fprintf(f, " ");
fprintf(f, " ");
}
static ssize_t log_buf__write(void *cookie, const char *buf, size_t size)
{
struct log_buf *b = cookie;
size_t sz = size;
if (!b->buf)
return size;
while (sz) {
size_t space = b->buf_sz - b->head;
size_t n = min(space, sz);
memcpy(b->buf + b->head, buf, n);
sz -= n;
buf += n;
b->head += n;
if (sz && b->head >= b->buf_sz) {
b->head = 0;
b->wrapped = true;
}
}
return size;
}
static int log_buf__close(void *cookie)
{
struct log_buf *b = cookie;
zfree(&b->buf);
return 0;
}
static FILE *log_buf__open(struct log_buf *b, FILE *backend, unsigned int sz)
{
cookie_io_functions_t fns = {
.write = log_buf__write,
.close = log_buf__close,
};
FILE *file;
memset(b, 0, sizeof(*b));
b->buf_sz = sz;
b->buf = malloc(b->buf_sz);
b->backend = backend;
file = fopencookie(b, "a", fns);
if (!file)
zfree(&b->buf);
return file;
}
static bool remove_first_line(const char **p, size_t *n)
{
for (; *n && **p != '\n'; ++*p, --*n)
;
if (*n) {
*p += 1;
*n -= 1;
return true;
}
return false;
}
static void write_lines(const char *p, size_t n, FILE *fp, bool *remove_first)
{
if (*remove_first)
*remove_first = !remove_first_line(&p, &n);
fwrite(p, n, 1, fp);
}
static void log_buf__dump(struct log_buf *b)
{
bool remove_first = false;
if (!b->buf)
return;
fflush(f); /* Could update b->head and b->wrapped */
fprintf(b->backend, "Dumping debug log buffer\n");
if (b->wrapped) {
remove_first = true;
write_lines(b->buf + b->head, b->buf_sz - b->head, b->backend, &remove_first);
}
write_lines(b->buf, b->head, b->backend, &remove_first);
fprintf(b->backend, "End of debug log buffer dump\n");
b->head = 0;
b->wrapped = false;
}
void intel_pt_log_dump_buf(void)
{
log_buf__dump(&log_buf);
}
static int intel_pt_log_open(void)
{
if (!intel_pt_enable_logging)
return -1;
if (f)
return 0;
if (log_name[0])
f = fopen(log_name, "w+");
else
f = stdout;
if (f && intel_pt_dump_log_on_error)
f = log_buf__open(&log_buf, f, intel_pt_log_on_error_size);
if (!f) {
intel_pt_enable_logging = false;
return -1;
}
return 0;
}
void __intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
uint64_t pos, const unsigned char *buf)
{
char desc[INTEL_PT_PKT_DESC_MAX];
if (intel_pt_log_open())
return;
intel_pt_print_data(buf, pkt_len, pos, 0);
intel_pt_pkt_desc(packet, desc, INTEL_PT_PKT_DESC_MAX);
fprintf(f, "%s\n", desc);
}
void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
{
char desc[INTEL_PT_INSN_DESC_MAX];
size_t len = intel_pt_insn->length;
if (intel_pt_log_open())
return;
if (len > INTEL_PT_INSN_BUF_SZ)
len = INTEL_PT_INSN_BUF_SZ;
intel_pt_print_data(intel_pt_insn->buf, len, ip, 8);
if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0)
fprintf(f, "%s\n", desc);
else
fprintf(f, "Bad instruction!\n");
}
void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
uint64_t ip)
{
char desc[INTEL_PT_INSN_DESC_MAX];
if (intel_pt_log_open())
return;
intel_pt_print_no_data(ip, 8);
if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0)
fprintf(f, "%s\n", desc);
else
fprintf(f, "Bad instruction!\n");
}
void __intel_pt_log(const char *fmt, ...)
{
va_list args;
if (intel_pt_log_open())
return;
va_start(args, fmt);
vfprintf(f, fmt, args);
va_end(args);
}
| linux-master | tools/perf/util/intel-pt-decoder/intel-pt-log.c |
/*
* jvmti_agent.c: JVMTI agent interface
*
* Adapted from the Oprofile code in opagent.c:
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright 2007 OProfile authors
* Jens Wilke
* Daniel Hansel
* Copyright IBM Corporation 2007
*/
#include <sys/types.h>
#include <sys/stat.h> /* for mkdir() */
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <limits.h>
#include <fcntl.h>
#include <unistd.h>
#include <time.h>
#include <sys/mman.h>
#include <syscall.h> /* for gettid() */
#include <err.h>
#include <linux/kernel.h>
#include "jvmti_agent.h"
#include "../util/jitdump.h"
#define JIT_LANG "java"
static char jit_path[PATH_MAX];
static void *marker_addr;
#ifndef HAVE_GETTID
static inline pid_t gettid(void)
{
return (pid_t)syscall(__NR_gettid);
}
#endif
static int get_e_machine(struct jitheader *hdr)
{
ssize_t sret;
char id[16];
int fd, ret = -1;
struct {
uint16_t e_type;
uint16_t e_machine;
} info;
fd = open("/proc/self/exe", O_RDONLY);
if (fd == -1)
return -1;
sret = read(fd, id, sizeof(id));
if (sret != sizeof(id))
goto error;
/* check ELF signature */
if (id[0] != 0x7f || id[1] != 'E' || id[2] != 'L' || id[3] != 'F')
goto error;
sret = read(fd, &info, sizeof(info));
if (sret != sizeof(info))
goto error;
hdr->elf_mach = info.e_machine;
ret = 0;
error:
close(fd);
return ret;
}
static int use_arch_timestamp;
static inline uint64_t
get_arch_timestamp(void)
{
#if defined(__i386__) || defined(__x86_64__)
unsigned int low, high;
asm volatile("rdtsc" : "=a" (low), "=d" (high));
return low | ((uint64_t)high) << 32;
#else
return 0;
#endif
}
#define NSEC_PER_SEC 1000000000
static int perf_clk_id = CLOCK_MONOTONIC;
static inline uint64_t
timespec_to_ns(const struct timespec *ts)
{
return ((uint64_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
static inline uint64_t
perf_get_timestamp(void)
{
struct timespec ts;
int ret;
if (use_arch_timestamp)
return get_arch_timestamp();
ret = clock_gettime(perf_clk_id, &ts);
if (ret)
return 0;
return timespec_to_ns(&ts);
}
static int
create_jit_cache_dir(void)
{
char str[32];
char *base, *p;
struct tm tm;
time_t t;
int ret;
time(&t);
localtime_r(&t, &tm);
base = getenv("JITDUMPDIR");
if (!base)
base = getenv("HOME");
if (!base)
base = ".";
strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
if (ret >= PATH_MAX) {
warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
" is too long, please check the cwd, JITDUMPDIR, and"
" HOME variables", base);
return -1;
}
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
}
ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
if (ret >= PATH_MAX) {
warnx("jvmti: cannot generate jit cache dir because"
" %s/.debug/jit is too long, please check the cwd,"
" JITDUMPDIR, and HOME variables", base);
return -1;
}
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
}
ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
if (ret >= PATH_MAX) {
warnx("jvmti: cannot generate jit cache dir because"
" %s/.debug/jit/%s.XXXXXXXX is too long, please check"
" the cwd, JITDUMPDIR, and HOME variables",
base, str);
return -1;
}
p = mkdtemp(jit_path);
if (p != jit_path) {
warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
return 0;
}
static int
perf_open_marker_file(int fd)
{
long pgsz;
pgsz = sysconf(_SC_PAGESIZE);
if (pgsz == -1)
return -1;
/*
* we mmap the jitdump to create an MMAP RECORD in perf.data file.
* The mmap is captured either live (perf record running when we mmap)
* or in deferred mode, via /proc/PID/maps
* the MMAP record is used as a marker of a jitdump file for more meta
* data info about the jitted code. Perf report/annotate detect this
* special filename and process the jitdump file.
*
* mapping must be PROT_EXEC to ensure it is captured by perf record
* even when not using -d option
*/
marker_addr = mmap(NULL, pgsz, PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
return (marker_addr == MAP_FAILED) ? -1 : 0;
}
static void
perf_close_marker_file(void)
{
long pgsz;
if (!marker_addr)
return;
pgsz = sysconf(_SC_PAGESIZE);
if (pgsz == -1)
return;
munmap(marker_addr, pgsz);
}
static void
init_arch_timestamp(void)
{
char *str = getenv("JITDUMP_USE_ARCH_TIMESTAMP");
if (!str || !*str || !strcmp(str, "0"))
return;
use_arch_timestamp = 1;
}
void *jvmti_open(void)
{
char dump_path[PATH_MAX];
struct jitheader header;
int fd, ret;
FILE *fp;
init_arch_timestamp();
/*
* check if clockid is supported
*/
if (!perf_get_timestamp()) {
if (use_arch_timestamp)
warnx("jvmti: arch timestamp not supported");
else
warnx("jvmti: kernel does not support %d clock id", perf_clk_id);
}
memset(&header, 0, sizeof(header));
/*
* jitdump file dir
*/
if (create_jit_cache_dir() < 0)
return NULL;
/*
* jitdump file name
*/
ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
if (ret >= PATH_MAX) {
warnx("jvmti: cannot generate jitdump file full path because"
" %s/jit-%i.dump is too long, please check the cwd,"
" JITDUMPDIR, and HOME variables", jit_path, getpid());
return NULL;
}
fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
if (fd == -1)
return NULL;
/*
* create perf.data maker for the jitdump file
*/
if (perf_open_marker_file(fd)) {
warnx("jvmti: failed to create marker file");
return NULL;
}
fp = fdopen(fd, "w+");
if (!fp) {
warn("jvmti: cannot create %s", dump_path);
close(fd);
goto error;
}
warnx("jvmti: jitdump in %s", dump_path);
if (get_e_machine(&header)) {
warn("get_e_machine failed\n");
goto error;
}
header.magic = JITHEADER_MAGIC;
header.version = JITHEADER_VERSION;
header.total_size = sizeof(header);
header.pid = getpid();
header.timestamp = perf_get_timestamp();
if (use_arch_timestamp)
header.flags |= JITDUMP_FLAGS_ARCH_TIMESTAMP;
if (!fwrite(&header, sizeof(header), 1, fp)) {
warn("jvmti: cannot write dumpfile header");
goto error;
}
return fp;
error:
fclose(fp);
return NULL;
}
int
jvmti_close(void *agent)
{
struct jr_code_close rec;
FILE *fp = agent;
if (!fp) {
warnx("jvmti: invalid fd in close_agent");
return -1;
}
rec.p.id = JIT_CODE_CLOSE;
rec.p.total_size = sizeof(rec);
rec.p.timestamp = perf_get_timestamp();
if (!fwrite(&rec, sizeof(rec), 1, fp))
return -1;
fclose(fp);
fp = NULL;
perf_close_marker_file();
return 0;
}
int
jvmti_write_code(void *agent, char const *sym,
uint64_t vma, void const *code, unsigned int const size)
{
static int code_generation = 1;
struct jr_code_load rec;
size_t sym_len;
FILE *fp = agent;
int ret = -1;
/* don't care about 0 length function, no samples */
if (size == 0)
return 0;
if (!fp) {
warnx("jvmti: invalid fd in write_native_code");
return -1;
}
sym_len = strlen(sym) + 1;
rec.p.id = JIT_CODE_LOAD;
rec.p.total_size = sizeof(rec) + sym_len;
rec.p.timestamp = perf_get_timestamp();
rec.code_size = size;
rec.vma = vma;
rec.code_addr = vma;
rec.pid = getpid();
rec.tid = gettid();
if (code)
rec.p.total_size += size;
/*
* If JVM is multi-threaded, multiple concurrent calls to agent
* may be possible, so protect file writes
*/
flockfile(fp);
/*
* get code index inside lock to avoid race condition
*/
rec.code_index = code_generation++;
ret = fwrite_unlocked(&rec, sizeof(rec), 1, fp);
fwrite_unlocked(sym, sym_len, 1, fp);
if (code)
fwrite_unlocked(code, size, 1, fp);
funlockfile(fp);
ret = 0;
return ret;
}
int
jvmti_write_debug_info(void *agent, uint64_t code,
int nr_lines, jvmti_line_info_t *li,
const char * const * file_names)
{
struct jr_code_debug_info rec;
size_t sret, len, size, flen = 0;
uint64_t addr;
FILE *fp = agent;
int i;
/*
* no entry to write
*/
if (!nr_lines)
return 0;
if (!fp) {
warnx("jvmti: invalid fd in write_debug_info");
return -1;
}
for (i = 0; i < nr_lines; ++i) {
flen += strlen(file_names[i]) + 1;
}
rec.p.id = JIT_CODE_DEBUG_INFO;
size = sizeof(rec);
rec.p.timestamp = perf_get_timestamp();
rec.code_addr = (uint64_t)(uintptr_t)code;
rec.nr_entry = nr_lines;
/*
* on disk source line info layout:
* uint64_t : addr
* int : line number
* int : column discriminator
* file[] : source file name
*/
size += nr_lines * sizeof(struct debug_entry);
size += flen;
rec.p.total_size = size;
/*
* If JVM is multi-threaded, multiple concurrent calls to agent
* may be possible, so protect file writes
*/
flockfile(fp);
sret = fwrite_unlocked(&rec, sizeof(rec), 1, fp);
if (sret != 1)
goto error;
for (i = 0; i < nr_lines; i++) {
addr = (uint64_t)li[i].pc;
len = sizeof(addr);
sret = fwrite_unlocked(&addr, len, 1, fp);
if (sret != 1)
goto error;
len = sizeof(li[0].line_number);
sret = fwrite_unlocked(&li[i].line_number, len, 1, fp);
if (sret != 1)
goto error;
len = sizeof(li[0].discrim);
sret = fwrite_unlocked(&li[i].discrim, len, 1, fp);
if (sret != 1)
goto error;
sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);
if (sret != 1)
goto error;
}
funlockfile(fp);
return 0;
error:
funlockfile(fp);
return -1;
}
| linux-master | tools/perf/jvmti/jvmti_agent.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/string.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <err.h>
#include <jvmti.h>
#ifdef HAVE_JVMTI_CMLR
#include <jvmticmlr.h>
#endif
#include <limits.h>
#include "jvmti_agent.h"
static int has_line_numbers;
void *jvmti_agent;
static void print_error(jvmtiEnv *jvmti, const char *msg, jvmtiError ret)
{
char *err_msg = NULL;
jvmtiError err;
err = (*jvmti)->GetErrorName(jvmti, ret, &err_msg);
if (err == JVMTI_ERROR_NONE) {
warnx("%s failed with %s", msg, err_msg);
(*jvmti)->Deallocate(jvmti, (unsigned char *)err_msg);
} else {
warnx("%s failed with an unknown error %d", msg, ret);
}
}
#ifdef HAVE_JVMTI_CMLR
static jvmtiError
do_get_line_number(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
jvmti_line_info_t *tab)
{
jint i, nr_lines = 0;
jvmtiLineNumberEntry *loc_tab = NULL;
jvmtiError ret;
jint src_line = -1;
ret = (*jvmti)->GetLineNumberTable(jvmti, m, &nr_lines, &loc_tab);
if (ret == JVMTI_ERROR_ABSENT_INFORMATION || ret == JVMTI_ERROR_NATIVE_METHOD) {
/* No debug information for this method */
return ret;
} else if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetLineNumberTable", ret);
return ret;
}
for (i = 0; i < nr_lines && loc_tab[i].start_location <= bci; i++) {
src_line = i;
}
if (src_line != -1) {
tab->pc = (unsigned long)pc;
tab->line_number = loc_tab[src_line].line_number;
tab->discrim = 0; /* not yet used */
tab->methodID = m;
ret = JVMTI_ERROR_NONE;
} else {
ret = JVMTI_ERROR_ABSENT_INFORMATION;
}
(*jvmti)->Deallocate(jvmti, (unsigned char *)loc_tab);
return ret;
}
static jvmtiError
get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **tab, int *nr_lines)
{
const jvmtiCompiledMethodLoadRecordHeader *hdr;
jvmtiCompiledMethodLoadInlineRecord *rec;
PCStackInfo *c;
jint ret;
int nr_total = 0;
int i, lines_total = 0;
if (!(tab && nr_lines))
return JVMTI_ERROR_NULL_POINTER;
/*
* Phase 1 -- get the number of lines necessary
*/
for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
nr_total += rec->numpcs;
}
}
if (nr_total == 0)
return JVMTI_ERROR_NOT_FOUND;
/*
* Phase 2 -- allocate big enough line table
*/
*tab = malloc(nr_total * sizeof(**tab));
if (!*tab)
return JVMTI_ERROR_OUT_OF_MEMORY;
for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
for (i = 0; i < rec->numpcs; i++) {
c = rec->pcinfo + i;
/*
* c->methods is the stack of inlined method calls
* at c->pc. [0] is the leaf method. Caller frames
* are ignored at the moment.
*/
ret = do_get_line_number(jvmti, c->pc,
c->methods[0],
c->bcis[0],
*tab + lines_total);
if (ret == JVMTI_ERROR_NONE)
lines_total++;
}
}
}
*nr_lines = lines_total;
return JVMTI_ERROR_NONE;
}
#else /* HAVE_JVMTI_CMLR */
static jvmtiError
get_line_numbers(jvmtiEnv *jvmti __maybe_unused, const void *compile_info __maybe_unused,
jvmti_line_info_t **tab __maybe_unused, int *nr_lines __maybe_unused)
{
return JVMTI_ERROR_NONE;
}
#endif /* HAVE_JVMTI_CMLR */
static void
copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
{
/*
* Assume path name is class hierarchy, this is a common practice with Java programs
*/
if (*class_sign == 'L') {
int j, i = 0;
char *p = strrchr(class_sign, '/');
if (p) {
/* drop the 'L' prefix and copy up to the final '/' */
for (i = 0; i < (p - class_sign); i++)
result[i] = class_sign[i+1];
}
/*
* append file name, we use loops and not string ops to avoid modifying
* class_sign which is used later for the symbol name
*/
for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++)
result[i] = file_name[j];
result[i] = '\0';
} else {
/* fallback case */
strlcpy(result, file_name, max_length);
}
}
static jvmtiError
get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer)
{
jvmtiError ret;
jclass decl_class;
char *file_name = NULL;
char *class_sign = NULL;
char fn[PATH_MAX];
size_t len;
ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetMethodDeclaringClass", ret);
return ret;
}
ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetSourceFileName", ret);
return ret;
}
ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetClassSignature", ret);
goto free_file_name_error;
}
copy_class_filename(class_sign, file_name, fn, PATH_MAX);
len = strlen(fn);
*buffer = malloc((len + 1) * sizeof(char));
if (!*buffer) {
print_error(jvmti, "GetClassSignature", ret);
ret = JVMTI_ERROR_OUT_OF_MEMORY;
goto free_class_sign_error;
}
strcpy(*buffer, fn);
ret = JVMTI_ERROR_NONE;
free_class_sign_error:
(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
free_file_name_error:
(*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
return ret;
}
static jvmtiError
fill_source_filenames(jvmtiEnv *jvmti, int nr_lines,
const jvmti_line_info_t * line_tab,
char ** file_names)
{
int index;
jvmtiError ret;
for (index = 0; index < nr_lines; ++index) {
ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index]));
if (ret != JVMTI_ERROR_NONE)
return ret;
}
return JVMTI_ERROR_NONE;
}
static void JNICALL
compiled_method_load_cb(jvmtiEnv *jvmti,
jmethodID method,
jint code_size,
void const *code_addr,
jint map_length,
jvmtiAddrLocationMap const *map,
const void *compile_info)
{
jvmti_line_info_t *line_tab = NULL;
char ** line_file_names = NULL;
jclass decl_class;
char *class_sign = NULL;
char *func_name = NULL;
char *func_sign = NULL;
uint64_t addr = (uint64_t)(uintptr_t)code_addr;
jvmtiError ret;
int nr_lines = 0; /* in line_tab[] */
size_t len;
int output_debug_info = 0;
ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
&decl_class);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetMethodDeclaringClass", ret);
return;
}
if (has_line_numbers && map && map_length) {
ret = get_line_numbers(jvmti, compile_info, &line_tab, &nr_lines);
if (ret != JVMTI_ERROR_NONE) {
if (ret != JVMTI_ERROR_NOT_FOUND) {
warnx("jvmti: cannot get line table for method");
}
nr_lines = 0;
} else if (nr_lines > 0) {
line_file_names = malloc(sizeof(char*) * nr_lines);
if (!line_file_names) {
warnx("jvmti: cannot allocate space for line table method names");
} else {
memset(line_file_names, 0, sizeof(char*) * nr_lines);
ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
if (ret != JVMTI_ERROR_NONE) {
warnx("jvmti: fill_source_filenames failed");
} else {
output_debug_info = 1;
}
}
}
}
ret = (*jvmti)->GetClassSignature(jvmti, decl_class,
&class_sign, NULL);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetClassSignature", ret);
goto error;
}
ret = (*jvmti)->GetMethodName(jvmti, method, &func_name,
&func_sign, NULL);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetMethodName", ret);
goto error;
}
/*
* write source line info record if we have it
*/
if (output_debug_info)
if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names))
warnx("jvmti: write_debug_info() failed");
len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
{
char str[len];
snprintf(str, len, "%s%s%s", class_sign, func_name, func_sign);
if (jvmti_write_code(jvmti_agent, str, addr, code_addr, code_size))
warnx("jvmti: write_code() failed");
}
error:
(*jvmti)->Deallocate(jvmti, (unsigned char *)func_name);
(*jvmti)->Deallocate(jvmti, (unsigned char *)func_sign);
(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
free(line_tab);
while (line_file_names && (nr_lines > 0)) {
if (line_file_names[nr_lines - 1]) {
free(line_file_names[nr_lines - 1]);
}
nr_lines -= 1;
}
free(line_file_names);
}
static void JNICALL
code_generated_cb(jvmtiEnv *jvmti,
char const *name,
void const *code_addr,
jint code_size)
{
uint64_t addr = (uint64_t)(unsigned long)code_addr;
int ret;
ret = jvmti_write_code(jvmti_agent, name, addr, code_addr, code_size);
if (ret)
warnx("jvmti: write_code() failed for code_generated");
}
JNIEXPORT jint JNICALL
Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __maybe_unused)
{
jvmtiEventCallbacks cb;
jvmtiCapabilities caps1;
jvmtiJlocationFormat format;
jvmtiEnv *jvmti = NULL;
jint ret;
jvmti_agent = jvmti_open();
if (!jvmti_agent) {
warnx("jvmti: open_agent failed");
return -1;
}
/*
* Request a JVMTI interface version 1 environment
*/
ret = (*jvm)->GetEnv(jvm, (void *)&jvmti, JVMTI_VERSION_1);
if (ret != JNI_OK) {
warnx("jvmti: jvmti version 1 not supported");
return -1;
}
/*
* acquire method_load capability, we require it
* request line numbers (optional)
*/
memset(&caps1, 0, sizeof(caps1));
caps1.can_generate_compiled_method_load_events = 1;
ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "AddCapabilities", ret);
return -1;
}
ret = (*jvmti)->GetJLocationFormat(jvmti, &format);
if (ret == JVMTI_ERROR_NONE && format == JVMTI_JLOCATION_JVMBCI) {
memset(&caps1, 0, sizeof(caps1));
caps1.can_get_line_numbers = 1;
caps1.can_get_source_file_name = 1;
ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
if (ret == JVMTI_ERROR_NONE)
has_line_numbers = 1;
} else if (ret != JVMTI_ERROR_NONE)
print_error(jvmti, "GetJLocationFormat", ret);
memset(&cb, 0, sizeof(cb));
cb.CompiledMethodLoad = compiled_method_load_cb;
cb.DynamicCodeGenerated = code_generated_cb;
ret = (*jvmti)->SetEventCallbacks(jvmti, &cb, sizeof(cb));
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "SetEventCallbacks", ret);
return -1;
}
ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
JVMTI_EVENT_COMPILED_METHOD_LOAD, NULL);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "SetEventNotificationMode(METHOD_LOAD)", ret);
return -1;
}
ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
JVMTI_EVENT_DYNAMIC_CODE_GENERATED, NULL);
if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "SetEventNotificationMode(CODE_GENERATED)", ret);
return -1;
}
return 0;
}
JNIEXPORT void JNICALL
Agent_OnUnload(JavaVM *jvm __maybe_unused)
{
int ret;
ret = jvmti_close(jvmti_agent);
if (ret)
errx(1, "Error: op_close_agent()");
}
| linux-master | tools/perf/jvmti/libjvmti.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dlfilter-test-api-v0.c: test original (v0) API for perf --dlfilter shared object
* Copyright (c) 2021, Intel Corporation.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
/*
* Copy original (v0) API instead of including current API
*/
#include <linux/perf_event.h>
#include <linux/types.h>
/* Definitions for perf_dlfilter_sample flags */
enum {
PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
};
/*
* perf sample event information (as per perf script and <linux/perf_event.h>)
*/
struct perf_dlfilter_sample {
__u32 size; /* Size of this structure (for compatibility checking) */
__u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
__u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
__u64 ip;
__s32 pid;
__s32 tid;
__u64 time;
__u64 addr;
__u64 id;
__u64 stream_id;
__u64 period;
__u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
__u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
__u64 insn_cnt; /* For instructions-per-cycle (IPC) */
__u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
__s32 cpu;
__u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
__u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
__u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
__u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
__u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
__u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
__u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
__u8 addr_correlates_sym; /* True => resolve_addr() can be called */
__u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
__u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
__u64 brstack_nr; /* Number of brstack entries */
const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
__u64 raw_callchain_nr; /* Number of raw_callchain entries */
const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
const char *event;
};
/*
* Address location (as per perf script)
*/
struct perf_dlfilter_al {
__u32 size; /* Size of this structure (for compatibility checking) */
__u32 symoff;
const char *sym;
__u64 addr; /* Mapped address (from dso) */
__u64 sym_start;
__u64 sym_end;
const char *dso;
__u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
__u8 is_64_bit; /* Only valid if dso is not NULL */
__u8 is_kernel_ip; /* True if in kernel space */
__u32 buildid_size;
__u8 *buildid;
/* Below members are only populated by resolve_ip() */
__u8 filtered; /* True if this sample event will be filtered out */
const char *comm;
};
struct perf_dlfilter_fns {
/* Return information about ip */
const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
/* Return information about addr (if addr_correlates_sym) */
const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
/* Return arguments from --dlarg option */
char **(*args)(void *ctx, int *dlargc);
/*
* Return information about address (al->size must be set before
* calling). Returns 0 on success, -1 otherwise.
*/
__s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
/* Return instruction bytes and length */
const __u8 *(*insn)(void *ctx, __u32 *length);
/* Return source file name and line number */
const char *(*srcline)(void *ctx, __u32 *line_number);
/* Return perf_event_attr, refer <linux/perf_event.h> */
struct perf_event_attr *(*attr)(void *ctx);
/* Read object code, return numbers of bytes read */
__s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
/* Reserved */
void *(*reserved[120])(void *);
};
struct perf_dlfilter_fns perf_dlfilter_fns;
static int verbose;
#define pr_debug(fmt, ...) do { \
if (verbose > 0) \
fprintf(stderr, fmt, ##__VA_ARGS__); \
} while (0)
static int test_fail(const char *msg)
{
pr_debug("%s\n", msg);
return -1;
}
#define CHECK(x) do { \
if (!(x)) \
return test_fail("Check '" #x "' failed\n"); \
} while (0)
struct filter_data {
__u64 ip;
__u64 addr;
int do_early;
int early_filter_cnt;
int filter_cnt;
};
static struct filter_data *filt_dat;
int start(void **data, void *ctx)
{
int dlargc;
char **dlargv;
struct filter_data *d;
static bool called;
verbose = 1;
CHECK(!filt_dat && !called);
called = true;
d = calloc(1, sizeof(*d));
if (!d)
test_fail("Failed to allocate memory");
filt_dat = d;
*data = d;
dlargv = perf_dlfilter_fns.args(ctx, &dlargc);
CHECK(dlargc == 6);
CHECK(!strcmp(dlargv[0], "first"));
verbose = strtol(dlargv[1], NULL, 0);
d->ip = strtoull(dlargv[2], NULL, 0);
d->addr = strtoull(dlargv[3], NULL, 0);
d->do_early = strtol(dlargv[4], NULL, 0);
CHECK(!strcmp(dlargv[5], "last"));
pr_debug("%s API\n", __func__);
return 0;
}
#define CHECK_SAMPLE(x) do { \
if (sample->x != expected.x) \
return test_fail("'" #x "' not expected value\n"); \
} while (0)
static int check_sample(struct filter_data *d, const struct perf_dlfilter_sample *sample)
{
struct perf_dlfilter_sample expected = {
.ip = d->ip,
.pid = 12345,
.tid = 12346,
.time = 1234567890,
.addr = d->addr,
.id = 99,
.stream_id = 101,
.period = 543212345,
.cpu = 31,
.cpumode = PERF_RECORD_MISC_USER,
.addr_correlates_sym = 1,
.misc = PERF_RECORD_MISC_USER,
};
CHECK(sample->size >= sizeof(struct perf_dlfilter_sample));
CHECK_SAMPLE(ip);
CHECK_SAMPLE(pid);
CHECK_SAMPLE(tid);
CHECK_SAMPLE(time);
CHECK_SAMPLE(addr);
CHECK_SAMPLE(id);
CHECK_SAMPLE(stream_id);
CHECK_SAMPLE(period);
CHECK_SAMPLE(cpu);
CHECK_SAMPLE(cpumode);
CHECK_SAMPLE(addr_correlates_sym);
CHECK_SAMPLE(misc);
CHECK(!sample->raw_data);
CHECK_SAMPLE(brstack_nr);
CHECK(!sample->brstack);
CHECK_SAMPLE(raw_callchain_nr);
CHECK(!sample->raw_callchain);
#define EVENT_NAME "branches:"
CHECK(!strncmp(sample->event, EVENT_NAME, strlen(EVENT_NAME)));
return 0;
}
static int check_al(void *ctx)
{
const struct perf_dlfilter_al *al;
al = perf_dlfilter_fns.resolve_ip(ctx);
if (!al)
return test_fail("resolve_ip() failed");
CHECK(al->sym && !strcmp("foo", al->sym));
CHECK(!al->symoff);
return 0;
}
static int check_addr_al(void *ctx)
{
const struct perf_dlfilter_al *addr_al;
addr_al = perf_dlfilter_fns.resolve_addr(ctx);
if (!addr_al)
return test_fail("resolve_addr() failed");
CHECK(addr_al->sym && !strcmp("bar", addr_al->sym));
CHECK(!addr_al->symoff);
return 0;
}
static int check_address_al(void *ctx, const struct perf_dlfilter_sample *sample)
{
struct perf_dlfilter_al address_al;
const struct perf_dlfilter_al *al;
al = perf_dlfilter_fns.resolve_ip(ctx);
if (!al)
return test_fail("resolve_ip() failed");
address_al.size = sizeof(address_al);
if (perf_dlfilter_fns.resolve_address(ctx, sample->ip, &address_al))
return test_fail("resolve_address() failed");
CHECK(address_al.sym && al->sym);
CHECK(!strcmp(address_al.sym, al->sym));
CHECK(address_al.addr == al->addr);
CHECK(address_al.sym_start == al->sym_start);
CHECK(address_al.sym_end == al->sym_end);
CHECK(address_al.dso && al->dso);
CHECK(!strcmp(address_al.dso, al->dso));
return 0;
}
static int check_attr(void *ctx)
{
struct perf_event_attr *attr = perf_dlfilter_fns.attr(ctx);
CHECK(attr);
CHECK(attr->type == PERF_TYPE_HARDWARE);
CHECK(attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
return 0;
}
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
CHECK(data && filt_dat == data);
if (early) {
CHECK(!d->early_filter_cnt);
d->early_filter_cnt += 1;
} else {
CHECK(!d->filter_cnt);
CHECK(d->early_filter_cnt);
CHECK(d->do_early != 2);
d->filter_cnt += 1;
}
if (check_sample(data, sample))
return -1;
if (check_attr(ctx))
return -1;
if (early && !d->do_early)
return 0;
if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
return -1;
if (early)
return d->do_early == 2;
return 1;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
pr_debug("%s API\n", __func__);
return do_checks(data, sample, ctx, true);
}
int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
pr_debug("%s API\n", __func__);
return do_checks(data, sample, ctx, false);
}
int stop(void *data, void *ctx)
{
static bool called;
pr_debug("%s API\n", __func__);
CHECK(data && filt_dat == data && !called);
called = true;
free(data);
filt_dat = NULL;
return 0;
}
const char *filter_description(const char **long_description)
{
*long_description = "Filter used by the 'dlfilter C API' perf test";
return "dlfilter to test v0 C API";
}
| linux-master | tools/perf/dlfilters/dlfilter-test-api-v0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dlfilter-show-cycles.c: Print the number of cycles at the start of each line
* Copyright (c) 2021, Intel Corporation.
*/
#include <perf/perf_dlfilter.h>
#include <string.h>
#include <stdio.h>
#define MAX_CPU 4096
enum {
INSTR_CYC,
BRNCH_CYC,
OTHER_CYC,
MAX_ENTRY
};
static __u64 cycles[MAX_CPU][MAX_ENTRY];
static __u64 cycles_rpt[MAX_CPU][MAX_ENTRY];
#define BITS 16
#define TABLESZ (1 << BITS)
#define TABLEMAX (TABLESZ / 2)
#define MASK (TABLESZ - 1)
static struct entry {
__u32 used;
__s32 tid;
__u64 cycles[MAX_ENTRY];
__u64 cycles_rpt[MAX_ENTRY];
} table[TABLESZ];
static int tid_cnt;
static int event_entry(const char *event)
{
if (!event)
return OTHER_CYC;
if (!strncmp(event, "instructions", 12))
return INSTR_CYC;
if (!strncmp(event, "branches", 8))
return BRNCH_CYC;
return OTHER_CYC;
}
static struct entry *find_entry(__s32 tid)
{
__u32 pos = tid & MASK;
struct entry *e;
e = &table[pos];
while (e->used) {
if (e->tid == tid)
return e;
if (++pos == TABLESZ)
pos = 0;
e = &table[pos];
}
if (tid_cnt >= TABLEMAX) {
fprintf(stderr, "Too many threads\n");
return NULL;
}
tid_cnt += 1;
e->used = 1;
e->tid = tid;
return e;
}
static void add_entry(__s32 tid, int pos, __u64 cnt)
{
struct entry *e = find_entry(tid);
if (e)
e->cycles[pos] += cnt;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
__s32 cpu = sample->cpu;
__s32 tid = sample->tid;
int pos;
if (!sample->cyc_cnt)
return 0;
pos = event_entry(sample->event);
if (cpu >= 0 && cpu < MAX_CPU)
cycles[cpu][pos] += sample->cyc_cnt;
else if (tid != -1)
add_entry(tid, pos, sample->cyc_cnt);
return 0;
}
static void print_vals(__u64 cycles, __u64 delta)
{
if (delta)
printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta);
else
printf("%10llu %10s ", (unsigned long long)cycles, "");
}
int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
__s32 cpu = sample->cpu;
__s32 tid = sample->tid;
int pos;
pos = event_entry(sample->event);
if (cpu >= 0 && cpu < MAX_CPU) {
print_vals(cycles[cpu][pos], cycles[cpu][pos] - cycles_rpt[cpu][pos]);
cycles_rpt[cpu][pos] = cycles[cpu][pos];
return 0;
}
if (tid != -1) {
struct entry *e = find_entry(tid);
if (e) {
print_vals(e->cycles[pos], e->cycles[pos] - e->cycles_rpt[pos]);
e->cycles_rpt[pos] = e->cycles[pos];
return 0;
}
}
printf("%22s", "");
return 0;
}
const char *filter_description(const char **long_description)
{
static char *long_desc = "Cycle counts are accumulated per CPU (or "
"per thread if CPU is not recorded) from IPC information, and "
"printed together with the change since the last print, at the "
"start of each line. Separate counts are kept for branches, "
"instructions or other events.";
*long_description = long_desc;
return "Print the number of cycles at the start of each line";
}
| linux-master | tools/perf/dlfilters/dlfilter-show-cycles.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test v2 API for perf --dlfilter shared object
* Copyright (c) 2023, Intel Corporation.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
/*
* Copy v2 API instead of including current API
*/
#include <linux/perf_event.h>
#include <linux/types.h>
/*
* The following macro can be used to determine if this header defines
* perf_dlfilter_sample machine_pid and vcpu.
*/
#define PERF_DLFILTER_HAS_MACHINE_PID
/* Definitions for perf_dlfilter_sample flags */
enum {
PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
};
/*
* perf sample event information (as per perf script and <linux/perf_event.h>)
*/
struct perf_dlfilter_sample {
__u32 size; /* Size of this structure (for compatibility checking) */
__u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
__u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
__u64 ip;
__s32 pid;
__s32 tid;
__u64 time;
__u64 addr;
__u64 id;
__u64 stream_id;
__u64 period;
__u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
__u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
__u64 insn_cnt; /* For instructions-per-cycle (IPC) */
__u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
__s32 cpu;
__u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
__u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
__u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
__u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
__u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
__u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
__u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
__u8 addr_correlates_sym; /* True => resolve_addr() can be called */
__u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
__u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
__u64 brstack_nr; /* Number of brstack entries */
const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
__u64 raw_callchain_nr; /* Number of raw_callchain entries */
const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
const char *event;
__s32 machine_pid;
__s32 vcpu;
};
/*
* Address location (as per perf script)
*/
struct perf_dlfilter_al {
__u32 size; /* Size of this structure (for compatibility checking) */
__u32 symoff;
const char *sym;
__u64 addr; /* Mapped address (from dso) */
__u64 sym_start;
__u64 sym_end;
const char *dso;
__u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
__u8 is_64_bit; /* Only valid if dso is not NULL */
__u8 is_kernel_ip; /* True if in kernel space */
__u32 buildid_size;
__u8 *buildid;
/* Below members are only populated by resolve_ip() */
__u8 filtered; /* True if this sample event will be filtered out */
const char *comm;
void *priv; /* Private data (v2 API) */
};
struct perf_dlfilter_fns {
/* Return information about ip */
const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
/* Return information about addr (if addr_correlates_sym) */
const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
/* Return arguments from --dlarg option */
char **(*args)(void *ctx, int *dlargc);
/*
* Return information about address (al->size must be set before
* calling). Returns 0 on success, -1 otherwise. Call al_cleanup()
* when 'al' data is no longer needed.
*/
__s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
/* Return instruction bytes and length */
const __u8 *(*insn)(void *ctx, __u32 *length);
/* Return source file name and line number */
const char *(*srcline)(void *ctx, __u32 *line_number);
/* Return perf_event_attr, refer <linux/perf_event.h> */
struct perf_event_attr *(*attr)(void *ctx);
/* Read object code, return numbers of bytes read */
__s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
/*
* If present (i.e. must check al_cleanup != NULL), call after
* resolve_address() to free any associated resources. (v2 API)
*/
void (*al_cleanup)(void *ctx, struct perf_dlfilter_al *al);
/* Reserved */
void *(*reserved[119])(void *);
};
struct perf_dlfilter_fns perf_dlfilter_fns;
static int verbose;
#define pr_debug(fmt, ...) do { \
if (verbose > 0) \
fprintf(stderr, fmt, ##__VA_ARGS__); \
} while (0)
static int test_fail(const char *msg)
{
pr_debug("%s\n", msg);
return -1;
}
#define CHECK(x) do { \
if (!(x)) \
return test_fail("Check '" #x "' failed\n"); \
} while (0)
struct filter_data {
__u64 ip;
__u64 addr;
int do_early;
int early_filter_cnt;
int filter_cnt;
};
static struct filter_data *filt_dat;
int start(void **data, void *ctx)
{
int dlargc;
char **dlargv;
struct filter_data *d;
static bool called;
verbose = 1;
CHECK(!filt_dat && !called);
called = true;
d = calloc(1, sizeof(*d));
if (!d)
test_fail("Failed to allocate memory");
filt_dat = d;
*data = d;
dlargv = perf_dlfilter_fns.args(ctx, &dlargc);
CHECK(dlargc == 6);
CHECK(!strcmp(dlargv[0], "first"));
verbose = strtol(dlargv[1], NULL, 0);
d->ip = strtoull(dlargv[2], NULL, 0);
d->addr = strtoull(dlargv[3], NULL, 0);
d->do_early = strtol(dlargv[4], NULL, 0);
CHECK(!strcmp(dlargv[5], "last"));
pr_debug("%s API\n", __func__);
return 0;
}
#define CHECK_SAMPLE(x) do { \
if (sample->x != expected.x) \
return test_fail("'" #x "' not expected value\n"); \
} while (0)
static int check_sample(struct filter_data *d, const struct perf_dlfilter_sample *sample)
{
struct perf_dlfilter_sample expected = {
.ip = d->ip,
.pid = 12345,
.tid = 12346,
.time = 1234567890,
.addr = d->addr,
.id = 99,
.stream_id = 101,
.period = 543212345,
.cpu = 31,
.cpumode = PERF_RECORD_MISC_USER,
.addr_correlates_sym = 1,
.misc = PERF_RECORD_MISC_USER,
};
CHECK(sample->size >= sizeof(struct perf_dlfilter_sample));
CHECK_SAMPLE(ip);
CHECK_SAMPLE(pid);
CHECK_SAMPLE(tid);
CHECK_SAMPLE(time);
CHECK_SAMPLE(addr);
CHECK_SAMPLE(id);
CHECK_SAMPLE(stream_id);
CHECK_SAMPLE(period);
CHECK_SAMPLE(cpu);
CHECK_SAMPLE(cpumode);
CHECK_SAMPLE(addr_correlates_sym);
CHECK_SAMPLE(misc);
CHECK(!sample->raw_data);
CHECK_SAMPLE(brstack_nr);
CHECK(!sample->brstack);
CHECK_SAMPLE(raw_callchain_nr);
CHECK(!sample->raw_callchain);
#define EVENT_NAME "branches:"
CHECK(!strncmp(sample->event, EVENT_NAME, strlen(EVENT_NAME)));
return 0;
}
static int check_al(void *ctx)
{
const struct perf_dlfilter_al *al;
al = perf_dlfilter_fns.resolve_ip(ctx);
if (!al)
return test_fail("resolve_ip() failed");
CHECK(al->sym && !strcmp("foo", al->sym));
CHECK(!al->symoff);
return 0;
}
static int check_addr_al(void *ctx)
{
const struct perf_dlfilter_al *addr_al;
addr_al = perf_dlfilter_fns.resolve_addr(ctx);
if (!addr_al)
return test_fail("resolve_addr() failed");
CHECK(addr_al->sym && !strcmp("bar", addr_al->sym));
CHECK(!addr_al->symoff);
return 0;
}
static int check_address_al(void *ctx, const struct perf_dlfilter_sample *sample)
{
struct perf_dlfilter_al address_al;
const struct perf_dlfilter_al *al;
al = perf_dlfilter_fns.resolve_ip(ctx);
if (!al)
return test_fail("resolve_ip() failed");
address_al.size = sizeof(address_al);
if (perf_dlfilter_fns.resolve_address(ctx, sample->ip, &address_al))
return test_fail("resolve_address() failed");
CHECK(address_al.sym && al->sym);
CHECK(!strcmp(address_al.sym, al->sym));
CHECK(address_al.addr == al->addr);
CHECK(address_al.sym_start == al->sym_start);
CHECK(address_al.sym_end == al->sym_end);
CHECK(address_al.dso && al->dso);
CHECK(!strcmp(address_al.dso, al->dso));
/* al_cleanup() is v2 API so may not be present */
if (perf_dlfilter_fns.al_cleanup)
perf_dlfilter_fns.al_cleanup(ctx, &address_al);
return 0;
}
static int check_attr(void *ctx)
{
struct perf_event_attr *attr = perf_dlfilter_fns.attr(ctx);
CHECK(attr);
CHECK(attr->type == PERF_TYPE_HARDWARE);
CHECK(attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
return 0;
}
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
CHECK(data && filt_dat == data);
if (early) {
CHECK(!d->early_filter_cnt);
d->early_filter_cnt += 1;
} else {
CHECK(!d->filter_cnt);
CHECK(d->early_filter_cnt);
CHECK(d->do_early != 2);
d->filter_cnt += 1;
}
if (check_sample(data, sample))
return -1;
if (check_attr(ctx))
return -1;
if (early && !d->do_early)
return 0;
if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
return -1;
if (early)
return d->do_early == 2;
return 1;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
pr_debug("%s API\n", __func__);
return do_checks(data, sample, ctx, true);
}
int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
pr_debug("%s API\n", __func__);
return do_checks(data, sample, ctx, false);
}
int stop(void *data, void *ctx)
{
static bool called;
pr_debug("%s API\n", __func__);
CHECK(data && filt_dat == data && !called);
called = true;
free(data);
filt_dat = NULL;
return 0;
}
const char *filter_description(const char **long_description)
{
*long_description = "Filter used by the 'dlfilter C API' perf test";
return "dlfilter to test v2 C API";
}
| linux-master | tools/perf/dlfilters/dlfilter-test-api-v2.c |
// SPDX-License-Identifier: GPL-2.0
#include "math.h"
#include "parse-events.h"
#include "pmu.h"
#include "pmus.h"
#include "tests.h"
#include <errno.h>
#include <stdio.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include "debug.h"
#include "../pmu-events/pmu-events.h"
#include <perf/evlist.h>
#include "util/evlist.h"
#include "util/expr.h"
#include "util/hashmap.h"
#include "util/parse-events.h"
#include "metricgroup.h"
#include "stat.h"
struct perf_pmu_test_event {
/* used for matching against events from generated pmu-events.c */
struct pmu_event event;
/* used for matching against event aliases */
/* extra events for aliases */
const char *alias_str;
/*
* Note: For when PublicDescription does not exist in the JSON, we
* will have no long_desc in pmu_event.long_desc, but long_desc may
* be set in the alias.
*/
const char *alias_long_desc;
/* PMU which we should match against */
const char *matching_pmu;
};
struct perf_pmu_test_pmu {
struct perf_pmu pmu;
struct perf_pmu_test_event const *aliases[10];
};
static const struct perf_pmu_test_event bp_l1_btb_correct = {
.event = {
.pmu = "default_core",
.name = "bp_l1_btb_correct",
.event = "event=0x8a",
.desc = "L1 BTB Correction",
.topic = "branch",
},
.alias_str = "event=0x8a",
.alias_long_desc = "L1 BTB Correction",
};
static const struct perf_pmu_test_event bp_l2_btb_correct = {
.event = {
.pmu = "default_core",
.name = "bp_l2_btb_correct",
.event = "event=0x8b",
.desc = "L2 BTB Correction",
.topic = "branch",
},
.alias_str = "event=0x8b",
.alias_long_desc = "L2 BTB Correction",
};
static const struct perf_pmu_test_event segment_reg_loads_any = {
.event = {
.pmu = "default_core",
.name = "segment_reg_loads.any",
.event = "event=0x6,period=200000,umask=0x80",
.desc = "Number of segment register loads",
.topic = "other",
},
.alias_str = "event=0x6,period=0x30d40,umask=0x80",
.alias_long_desc = "Number of segment register loads",
};
static const struct perf_pmu_test_event dispatch_blocked_any = {
.event = {
.pmu = "default_core",
.name = "dispatch_blocked.any",
.event = "event=0x9,period=200000,umask=0x20",
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
.alias_str = "event=0x9,period=0x30d40,umask=0x20",
.alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
};
static const struct perf_pmu_test_event eist_trans = {
.event = {
.pmu = "default_core",
.name = "eist_trans",
.event = "event=0x3a,period=200000,umask=0x0",
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
.alias_str = "event=0x3a,period=0x30d40,umask=0",
.alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
};
static const struct perf_pmu_test_event l3_cache_rd = {
.event = {
.pmu = "default_core",
.name = "l3_cache_rd",
.event = "event=0x40",
.desc = "L3 cache access, read",
.long_desc = "Attributable Level 3 cache access, read",
.topic = "cache",
},
.alias_str = "event=0x40",
.alias_long_desc = "Attributable Level 3 cache access, read",
};
static const struct perf_pmu_test_event *core_events[] = {
&bp_l1_btb_correct,
&bp_l2_btb_correct,
&segment_reg_loads_any,
&dispatch_blocked_any,
&eist_trans,
&l3_cache_rd,
NULL
};
static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = {
.event = {
.name = "uncore_hisi_ddrc.flux_wcmd",
.event = "event=0x2",
.desc = "DDRC write commands",
.topic = "uncore",
.long_desc = "DDRC write commands",
.pmu = "hisi_sccl,ddrc",
},
.alias_str = "event=0x2",
.alias_long_desc = "DDRC write commands",
.matching_pmu = "hisi_sccl1_ddrc2",
};
static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
.event = {
.name = "unc_cbo_xsnp_response.miss_eviction",
.event = "event=0x22,umask=0x81",
.desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.topic = "uncore",
.long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.pmu = "uncore_cbox",
},
.alias_str = "event=0x22,umask=0x81",
.alias_long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.matching_pmu = "uncore_cbox_0",
};
static const struct perf_pmu_test_event uncore_hyphen = {
.event = {
.name = "event-hyphen",
.event = "event=0xe0,umask=0x00",
.desc = "UNC_CBO_HYPHEN",
.topic = "uncore",
.long_desc = "UNC_CBO_HYPHEN",
.pmu = "uncore_cbox",
},
.alias_str = "event=0xe0,umask=0",
.alias_long_desc = "UNC_CBO_HYPHEN",
.matching_pmu = "uncore_cbox_0",
};
static const struct perf_pmu_test_event uncore_two_hyph = {
.event = {
.name = "event-two-hyph",
.event = "event=0xc0,umask=0x00",
.desc = "UNC_CBO_TWO_HYPH",
.topic = "uncore",
.long_desc = "UNC_CBO_TWO_HYPH",
.pmu = "uncore_cbox",
},
.alias_str = "event=0xc0,umask=0",
.alias_long_desc = "UNC_CBO_TWO_HYPH",
.matching_pmu = "uncore_cbox_0",
};
static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
.event = {
.name = "uncore_hisi_l3c.rd_hit_cpipe",
.event = "event=0x7",
.desc = "Total read hits",
.topic = "uncore",
.long_desc = "Total read hits",
.pmu = "hisi_sccl,l3c",
},
.alias_str = "event=0x7",
.alias_long_desc = "Total read hits",
.matching_pmu = "hisi_sccl3_l3c7",
};
static const struct perf_pmu_test_event uncore_imc_free_running_cache_miss = {
.event = {
.name = "uncore_imc_free_running.cache_miss",
.event = "event=0x12",
.desc = "Total cache misses",
.topic = "uncore",
.long_desc = "Total cache misses",
.pmu = "uncore_imc_free_running",
},
.alias_str = "event=0x12",
.alias_long_desc = "Total cache misses",
.matching_pmu = "uncore_imc_free_running_0",
};
static const struct perf_pmu_test_event uncore_imc_cache_hits = {
.event = {
.name = "uncore_imc.cache_hits",
.event = "event=0x34",
.desc = "Total cache hits",
.topic = "uncore",
.long_desc = "Total cache hits",
.pmu = "uncore_imc",
},
.alias_str = "event=0x34",
.alias_long_desc = "Total cache hits",
.matching_pmu = "uncore_imc_0",
};
static const struct perf_pmu_test_event *uncore_events[] = {
&uncore_hisi_ddrc_flux_wcmd,
&unc_cbo_xsnp_response_miss_eviction,
&uncore_hyphen,
&uncore_two_hyph,
&uncore_hisi_l3c_rd_hit_cpipe,
&uncore_imc_free_running_cache_miss,
&uncore_imc_cache_hits,
NULL
};
static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = {
.event = {
.name = "sys_ddr_pmu.write_cycles",
.event = "event=0x2b",
.desc = "ddr write-cycles event",
.topic = "uncore",
.pmu = "uncore_sys_ddr_pmu",
.compat = "v8",
},
.alias_str = "event=0x2b",
.alias_long_desc = "ddr write-cycles event",
.matching_pmu = "uncore_sys_ddr_pmu",
};
static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
.event = {
.name = "sys_ccn_pmu.read_cycles",
.event = "config=0x2c",
.desc = "ccn read-cycles event",
.topic = "uncore",
.pmu = "uncore_sys_ccn_pmu",
.compat = "0x01",
},
.alias_str = "config=0x2c",
.alias_long_desc = "ccn read-cycles event",
.matching_pmu = "uncore_sys_ccn_pmu",
};
static const struct perf_pmu_test_event *sys_events[] = {
&sys_ddr_pmu_write_cycles,
&sys_ccn_pmu_read_cycles,
NULL
};
static bool is_same(const char *reference, const char *test)
{
if (!reference && !test)
return true;
if (reference && !test)
return false;
if (!reference && test)
return false;
return !strcmp(reference, test);
}
static int compare_pmu_events(const struct pmu_event *e1, const struct pmu_event *e2)
{
if (!is_same(e1->name, e2->name)) {
pr_debug2("testing event e1 %s: mismatched name string, %s vs %s\n",
e1->name, e1->name, e2->name);
return -1;
}
if (!is_same(e1->compat, e2->compat)) {
pr_debug2("testing event e1 %s: mismatched compat string, %s vs %s\n",
e1->name, e1->compat, e2->compat);
return -1;
}
if (!is_same(e1->event, e2->event)) {
pr_debug2("testing event e1 %s: mismatched event, %s vs %s\n",
e1->name, e1->event, e2->event);
return -1;
}
if (!is_same(e1->desc, e2->desc)) {
pr_debug2("testing event e1 %s: mismatched desc, %s vs %s\n",
e1->name, e1->desc, e2->desc);
return -1;
}
if (!is_same(e1->topic, e2->topic)) {
pr_debug2("testing event e1 %s: mismatched topic, %s vs %s\n",
e1->name, e1->topic, e2->topic);
return -1;
}
if (!is_same(e1->long_desc, e2->long_desc)) {
pr_debug2("testing event e1 %s: mismatched long_desc, %s vs %s\n",
e1->name, e1->long_desc, e2->long_desc);
return -1;
}
if (!is_same(e1->pmu, e2->pmu)) {
pr_debug2("testing event e1 %s: mismatched pmu string, %s vs %s\n",
e1->name, e1->pmu, e2->pmu);
return -1;
}
if (!is_same(e1->unit, e2->unit)) {
pr_debug2("testing event e1 %s: mismatched unit, %s vs %s\n",
e1->name, e1->unit, e2->unit);
return -1;
}
if (e1->perpkg != e2->perpkg) {
pr_debug2("testing event e1 %s: mismatched perpkg, %d vs %d\n",
e1->name, e1->perpkg, e2->perpkg);
return -1;
}
if (e1->deprecated != e2->deprecated) {
pr_debug2("testing event e1 %s: mismatched deprecated, %d vs %d\n",
e1->name, e1->deprecated, e2->deprecated);
return -1;
}
return 0;
}
static int compare_alias_to_test_event(struct pmu_event_info *alias,
struct perf_pmu_test_event const *test_event,
char const *pmu_name)
{
struct pmu_event const *event = &test_event->event;
/* An alias was found, ensure everything is in order */
if (!is_same(alias->name, event->name)) {
pr_debug("testing aliases PMU %s: mismatched name, %s vs %s\n",
pmu_name, alias->name, event->name);
return -1;
}
if (!is_same(alias->desc, event->desc)) {
pr_debug("testing aliases PMU %s: mismatched desc, %s vs %s\n",
pmu_name, alias->desc, event->desc);
return -1;
}
if (!is_same(alias->long_desc, test_event->alias_long_desc)) {
pr_debug("testing aliases PMU %s: mismatched long_desc, %s vs %s\n",
pmu_name, alias->long_desc,
test_event->alias_long_desc);
return -1;
}
if (!is_same(alias->topic, event->topic)) {
pr_debug("testing aliases PMU %s: mismatched topic, %s vs %s\n",
pmu_name, alias->topic, event->topic);
return -1;
}
if (!is_same(alias->str, test_event->alias_str)) {
pr_debug("testing aliases PMU %s: mismatched str, %s vs %s\n",
pmu_name, alias->str, test_event->alias_str);
return -1;
}
if (!is_same(alias->long_desc, test_event->alias_long_desc)) {
pr_debug("testing aliases PMU %s: mismatched long desc, %s vs %s\n",
pmu_name, alias->str, test_event->alias_long_desc);
return -1;
}
if (!is_same(alias->pmu_name, test_event->event.pmu) &&
!is_same(alias->pmu_name, "default_core")) {
pr_debug("testing aliases PMU %s: mismatched pmu_name, %s vs %s\n",
pmu_name, alias->pmu_name, test_event->event.pmu);
return -1;
}
return 0;
}
static int test__pmu_event_table_core_callback(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *data)
{
int *map_events = data;
struct perf_pmu_test_event const **test_event_table;
bool found = false;
if (strcmp(pe->pmu, "default_core"))
test_event_table = &uncore_events[0];
else
test_event_table = &core_events[0];
for (; *test_event_table; test_event_table++) {
struct perf_pmu_test_event const *test_event = *test_event_table;
struct pmu_event const *event = &test_event->event;
if (strcmp(pe->name, event->name))
continue;
found = true;
(*map_events)++;
if (compare_pmu_events(pe, event))
return -1;
pr_debug("testing event table %s: pass\n", pe->name);
}
if (!found) {
pr_err("testing event table: could not find event %s\n", pe->name);
return -1;
}
return 0;
}
static int test__pmu_event_table_sys_callback(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *data)
{
int *map_events = data;
struct perf_pmu_test_event const **test_event_table;
bool found = false;
test_event_table = &sys_events[0];
for (; *test_event_table; test_event_table++) {
struct perf_pmu_test_event const *test_event = *test_event_table;
struct pmu_event const *event = &test_event->event;
if (strcmp(pe->name, event->name))
continue;
found = true;
(*map_events)++;
if (compare_pmu_events(pe, event))
return TEST_FAIL;
pr_debug("testing sys event table %s: pass\n", pe->name);
}
if (!found) {
pr_debug("testing sys event table: could not find event %s\n", pe->name);
return TEST_FAIL;
}
return TEST_OK;
}
/* Verify generated events from pmu-events.c are as expected */
static int test__pmu_event_table(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
const struct pmu_events_table *sys_event_table =
find_sys_events_table("pmu_events__test_soc_sys");
const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
int map_events = 0, expected_events, err;
/* ignore 3x sentinels */
expected_events = ARRAY_SIZE(core_events) +
ARRAY_SIZE(uncore_events) +
ARRAY_SIZE(sys_events) - 3;
if (!table || !sys_event_table)
return -1;
err = pmu_events_table__for_each_event(table, /*pmu=*/ NULL,
test__pmu_event_table_core_callback,
&map_events);
if (err)
return err;
err = pmu_events_table__for_each_event(sys_event_table, /*pmu=*/ NULL,
test__pmu_event_table_sys_callback,
&map_events);
if (err)
return err;
if (map_events != expected_events) {
pr_err("testing event table: found %d, but expected %d\n",
map_events, expected_events);
return TEST_FAIL;
}
return 0;
}
struct test_core_pmu_event_aliases_cb_args {
struct perf_pmu_test_event const *test_event;
int *count;
};
static int test_core_pmu_event_aliases_cb(void *state, struct pmu_event_info *alias)
{
struct test_core_pmu_event_aliases_cb_args *args = state;
if (compare_alias_to_test_event(alias, args->test_event, alias->pmu->name))
return -1;
(*args->count)++;
pr_debug2("testing aliases core PMU %s: matched event %s\n",
alias->pmu_name, alias->name);
return 0;
}
/* Verify aliases are as expected */
static int __test_core_pmu_event_aliases(const char *pmu_name, int *count)
{
struct perf_pmu_test_event const **test_event_table;
struct perf_pmu *pmu;
int res = 0;
const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
if (!table)
return -1;
test_event_table = &core_events[0];
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return -1;
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
INIT_LIST_HEAD(&pmu->caps);
INIT_LIST_HEAD(&pmu->list);
pmu->name = strdup(pmu_name);
pmu->is_core = true;
pmu->events_table = table;
pmu_add_cpu_aliases_table(pmu, table);
pmu->cpu_aliases_added = true;
pmu->sysfs_aliases_loaded = true;
res = pmu_events_table__find_event(table, pmu, "bp_l1_btb_correct", NULL, NULL);
if (res != 0) {
pr_debug("Missing test event in test architecture");
return res;
}
for (; *test_event_table; test_event_table++) {
struct perf_pmu_test_event test_event = **test_event_table;
struct pmu_event const *event = &test_event.event;
struct test_core_pmu_event_aliases_cb_args args = {
.test_event = &test_event,
.count = count,
};
int err;
test_event.event.pmu = pmu_name;
err = perf_pmu__find_event(pmu, event->name, &args,
test_core_pmu_event_aliases_cb);
if (err)
res = err;
}
perf_pmu__delete(pmu);
return res;
}
static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
{
int alias_count = 0, to_match_count = 0, matched_count = 0;
struct perf_pmu_test_event const **table;
struct perf_pmu *pmu = &test_pmu->pmu;
const char *pmu_name = pmu->name;
const struct pmu_events_table *events_table;
int res = 0;
events_table = find_core_events_table("testarch", "testcpu");
if (!events_table)
return -1;
pmu->events_table = events_table;
pmu_add_cpu_aliases_table(pmu, events_table);
pmu->cpu_aliases_added = true;
pmu->sysfs_aliases_loaded = true;
pmu_add_sys_aliases(pmu);
/* Count how many aliases we generated */
alias_count = perf_pmu__num_events(pmu);
/* Count how many aliases we expect from the known table */
for (table = &test_pmu->aliases[0]; *table; table++)
to_match_count++;
if (alias_count != to_match_count) {
pr_debug("testing aliases uncore PMU %s: mismatch expected aliases (%d) vs found (%d)\n",
pmu_name, to_match_count, alias_count);
return -1;
}
for (table = &test_pmu->aliases[0]; *table; table++) {
struct perf_pmu_test_event test_event = **table;
struct pmu_event const *event = &test_event.event;
int err;
struct test_core_pmu_event_aliases_cb_args args = {
.test_event = &test_event,
.count = &matched_count,
};
err = perf_pmu__find_event(pmu, event->name, &args,
test_core_pmu_event_aliases_cb);
if (err) {
res = err;
pr_debug("testing aliases uncore PMU %s: could not match alias %s\n",
pmu_name, event->name);
return -1;
}
}
if (alias_count != matched_count) {
pr_debug("testing aliases uncore PMU %s: mismatch found aliases (%d) vs matched (%d)\n",
pmu_name, matched_count, alias_count);
res = -1;
}
return res;
}
static struct perf_pmu_test_pmu test_pmus[] = {
{
.pmu = {
.name = "hisi_sccl1_ddrc2",
.is_uncore = 1,
},
.aliases = {
&uncore_hisi_ddrc_flux_wcmd,
},
},
{
.pmu = {
.name = "uncore_cbox_0",
.is_uncore = 1,
},
.aliases = {
&unc_cbo_xsnp_response_miss_eviction,
&uncore_hyphen,
&uncore_two_hyph,
},
},
{
.pmu = {
.name = "hisi_sccl3_l3c7",
.is_uncore = 1,
},
.aliases = {
&uncore_hisi_l3c_rd_hit_cpipe,
},
},
{
.pmu = {
.name = "uncore_imc_free_running_0",
.is_uncore = 1,
},
.aliases = {
&uncore_imc_free_running_cache_miss,
},
},
{
.pmu = {
.name = "uncore_imc_0",
.is_uncore = 1,
},
.aliases = {
&uncore_imc_cache_hits,
},
},
{
.pmu = {
.name = "uncore_sys_ddr_pmu0",
.is_uncore = 1,
.id = "v8",
},
.aliases = {
&sys_ddr_pmu_write_cycles,
},
},
{
.pmu = {
.name = "uncore_sys_ccn_pmu4",
.is_uncore = 1,
.id = "0x01",
},
.aliases = {
&sys_ccn_pmu_read_cycles,
},
},
};
/* Test that aliases generated are as expected */
static int test__aliases(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct perf_pmu *pmu = NULL;
unsigned long i;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
int count = 0;
if (list_empty(&pmu->format)) {
pr_debug2("skipping testing core PMU %s\n", pmu->name);
continue;
}
if (__test_core_pmu_event_aliases(pmu->name, &count)) {
pr_debug("testing core PMU %s aliases: failed\n", pmu->name);
return -1;
}
if (count == 0) {
pr_debug("testing core PMU %s aliases: no events to match\n",
pmu->name);
return -1;
}
pr_debug("testing core PMU %s aliases: pass\n", pmu->name);
}
for (i = 0; i < ARRAY_SIZE(test_pmus); i++) {
int res;
INIT_LIST_HEAD(&test_pmus[i].pmu.format);
INIT_LIST_HEAD(&test_pmus[i].pmu.aliases);
INIT_LIST_HEAD(&test_pmus[i].pmu.caps);
res = __test_uncore_pmu_event_aliases(&test_pmus[i]);
if (res)
return res;
}
return 0;
}
static bool is_number(const char *str)
{
char *end_ptr;
double v;
errno = 0;
v = strtod(str, &end_ptr);
(void)v; // We're not interested in this value, only if it is valid
return errno == 0 && end_ptr != str;
}
static int check_parse_id(const char *id, struct parse_events_error *error,
struct perf_pmu *fake_pmu)
{
struct evlist *evlist;
int ret;
char *dup, *cur;
/* Numbers are always valid. */
if (is_number(id))
return 0;
evlist = evlist__new();
if (!evlist)
return -ENOMEM;
dup = strdup(id);
if (!dup)
return -ENOMEM;
for (cur = strchr(dup, '@') ; cur; cur = strchr(++cur, '@'))
*cur = '/';
ret = __parse_events(evlist, dup, /*pmu_filter=*/NULL, error, fake_pmu,
/*warn_if_reordered=*/true);
free(dup);
evlist__delete(evlist);
return ret;
}
static int check_parse_fake(const char *id)
{
struct parse_events_error error;
int ret;
parse_events_error__init(&error);
ret = check_parse_id(id, &error, &perf_pmu__fake);
parse_events_error__exit(&error);
return ret;
}
struct metric {
struct list_head list;
struct metric_ref metric_ref;
};
static int test__parsing_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table,
void *data)
{
int *failures = data;
int k;
struct evlist *evlist;
struct perf_cpu_map *cpus;
struct evsel *evsel;
struct rblist metric_events = {
.nr_entries = 0,
};
int err = 0;
if (!pm->metric_expr)
return 0;
pr_debug("Found metric '%s'\n", pm->metric_name);
(*failures)++;
/*
* We need to prepare evlist for stat mode running on CPU 0
* because that's where all the stats are going to be created.
*/
evlist = evlist__new();
if (!evlist)
return -ENOMEM;
cpus = perf_cpu_map__new("0");
if (!cpus) {
evlist__delete(evlist);
return -ENOMEM;
}
perf_evlist__set_maps(&evlist->core, cpus, NULL);
err = metricgroup__parse_groups_test(evlist, table, pm->metric_name, &metric_events);
if (err) {
if (!strcmp(pm->metric_name, "M1") || !strcmp(pm->metric_name, "M2") ||
!strcmp(pm->metric_name, "M3")) {
(*failures)--;
pr_debug("Expected broken metric %s skipping\n", pm->metric_name);
err = 0;
}
goto out_err;
}
err = evlist__alloc_stats(/*config=*/NULL, evlist, /*alloc_raw=*/false);
if (err)
goto out_err;
/*
* Add all ids with a made up value. The value may trigger divide by
* zero when subtracted and so try to make them unique.
*/
k = 1;
evlist__alloc_aggr_stats(evlist, 1);
evlist__for_each_entry(evlist, evsel) {
evsel->stats->aggr->counts.val = k;
if (evsel__name_is(evsel, "duration_time"))
update_stats(&walltime_nsecs_stats, k);
k++;
}
evlist__for_each_entry(evlist, evsel) {
struct metric_event *me = metricgroup__lookup(&metric_events, evsel, false);
if (me != NULL) {
struct metric_expr *mexp;
list_for_each_entry (mexp, &me->head, nd) {
if (strcmp(mexp->metric_name, pm->metric_name))
continue;
pr_debug("Result %f\n", test_generic_metric(mexp, 0));
err = 0;
(*failures)--;
goto out_err;
}
}
}
pr_debug("Didn't find parsed metric %s", pm->metric_name);
err = 1;
out_err:
if (err)
pr_debug("Broken metric %s\n", pm->metric_name);
/* ... cleanup. */
metricgroup__rblist_exit(&metric_events);
evlist__free_stats(evlist);
perf_cpu_map__put(cpus);
evlist__delete(evlist);
return err;
}
static int test__parsing(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int failures = 0;
pmu_for_each_core_metric(test__parsing_callback, &failures);
pmu_for_each_sys_metric(test__parsing_callback, &failures);
return failures == 0 ? TEST_OK : TEST_FAIL;
}
struct test_metric {
const char *str;
};
static struct test_metric metrics[] = {
{ "(unc_p_power_state_occupancy.cores_c0 / unc_p_clockticks) * 100." },
{ "imx8_ddr0@read\\-cycles@ * 4 * 4", },
{ "imx8_ddr0@axid\\-read\\,axi_mask\\=0xffff\\,axi_id\\=0x0000@ * 4", },
{ "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", },
{ "(imx8_ddr0@read\\-cycles@ + imx8_ddr0@write\\-cycles@)", },
};
static int metric_parse_fake(const char *metric_name, const char *str)
{
struct expr_parse_ctx *ctx;
struct hashmap_entry *cur;
double result;
int ret = -1;
size_t bkt;
int i;
pr_debug("parsing '%s': '%s'\n", metric_name, str);
ctx = expr__ctx_new();
if (!ctx) {
pr_debug("expr__ctx_new failed");
return TEST_FAIL;
}
ctx->sctx.is_test = true;
if (expr__find_ids(str, NULL, ctx) < 0) {
pr_err("expr__find_ids failed\n");
return -1;
}
/*
* Add all ids with a made up value. The value may
* trigger divide by zero when subtracted and so try to
* make them unique.
*/
i = 1;
hashmap__for_each_entry(ctx->ids, cur, bkt)
expr__add_id_val(ctx, strdup(cur->pkey), i++);
hashmap__for_each_entry(ctx->ids, cur, bkt) {
if (check_parse_fake(cur->pkey)) {
pr_err("check_parse_fake failed\n");
goto out;
}
}
ret = 0;
if (expr__parse(&result, ctx, str)) {
/*
* Parsing failed, make numbers go from large to small which can
* resolve divide by zero issues.
*/
i = 1024;
hashmap__for_each_entry(ctx->ids, cur, bkt)
expr__add_id_val(ctx, strdup(cur->pkey), i--);
if (expr__parse(&result, ctx, str)) {
pr_err("expr__parse failed for %s\n", metric_name);
/* The following have hard to avoid divide by zero. */
if (!strcmp(metric_name, "tma_clears_resteers") ||
!strcmp(metric_name, "tma_mispredicts_resteers"))
ret = 0;
else
ret = -1;
}
}
out:
expr__ctx_free(ctx);
return ret;
}
static int test__parsing_fake_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table __maybe_unused,
void *data __maybe_unused)
{
return metric_parse_fake(pm->metric_name, pm->metric_expr);
}
/*
* Parse all the metrics for current architecture,
* or all defined cpus via the 'fake_pmu'
* in parse_events.
*/
static int test__parsing_fake(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int err = 0;
for (size_t i = 0; i < ARRAY_SIZE(metrics); i++) {
err = metric_parse_fake("", metrics[i].str);
if (err)
return err;
}
err = pmu_for_each_core_metric(test__parsing_fake_callback, NULL);
if (err)
return err;
return pmu_for_each_sys_metric(test__parsing_fake_callback, NULL);
}
static int test__parsing_threshold_callback(const struct pmu_metric *pm,
const struct pmu_metrics_table *table __maybe_unused,
void *data __maybe_unused)
{
if (!pm->metric_threshold)
return 0;
return metric_parse_fake(pm->metric_name, pm->metric_threshold);
}
static int test__parsing_threshold(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int err = 0;
err = pmu_for_each_core_metric(test__parsing_threshold_callback, NULL);
if (err)
return err;
return pmu_for_each_sys_metric(test__parsing_threshold_callback, NULL);
}
static struct test_case pmu_events_tests[] = {
TEST_CASE("PMU event table sanity", pmu_event_table),
TEST_CASE("PMU event map aliases", aliases),
TEST_CASE_REASON("Parsing of PMU event table metrics", parsing,
"some metrics failed"),
TEST_CASE("Parsing of PMU event table metrics with fake PMUs", parsing_fake),
TEST_CASE("Parsing of metric thresholds with fake PMUs", parsing_threshold),
{ .name = NULL, }
};
struct test_suite suite__pmu_events = {
.desc = "PMU events",
.test_cases = pmu_events_tests,
};
| linux-master | tools/perf/tests/pmu-events.c |
// SPDX-License-Identifier: GPL-2.0
#include "parse-events.h"
#include "pmu.h"
#include "tests.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/zalloc.h>
/* Simulated format definitions. */
static struct test_format {
const char *name;
const char *value;
} test_formats[] = {
{ "krava01", "config:0-1,62-63\n", },
{ "krava02", "config:10-17\n", },
{ "krava03", "config:5\n", },
{ "krava11", "config1:0,2,4,6,8,20-28\n", },
{ "krava12", "config1:63\n", },
{ "krava13", "config1:45-47\n", },
{ "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", },
{ "krava22", "config2:8,18,48,58\n", },
{ "krava23", "config2:28-29,38\n", },
};
/* Simulated users input. */
static struct parse_events_term test_terms[] = {
{
.config = "krava01",
.val.num = 15,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava02",
.val.num = 170,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava03",
.val.num = 1,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava11",
.val.num = 27,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava12",
.val.num = 1,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava13",
.val.num = 2,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava21",
.val.num = 119,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava22",
.val.num = 11,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
.config = "krava23",
.val.num = 2,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
};
/*
* Prepare format directory data, exported by kernel
* at /sys/bus/event_source/devices/<dev>/format.
*/
static char *test_format_dir_get(char *dir, size_t sz)
{
unsigned int i;
snprintf(dir, sz, "/tmp/perf-pmu-test-format-XXXXXX");
if (!mkdtemp(dir))
return NULL;
for (i = 0; i < ARRAY_SIZE(test_formats); i++) {
char name[PATH_MAX];
struct test_format *format = &test_formats[i];
FILE *file;
scnprintf(name, PATH_MAX, "%s/%s", dir, format->name);
file = fopen(name, "w");
if (!file)
return NULL;
if (1 != fwrite(format->value, strlen(format->value), 1, file))
break;
fclose(file);
}
return dir;
}
/* Cleanup format directory. */
static int test_format_dir_put(char *dir)
{
char buf[PATH_MAX + 20];
snprintf(buf, sizeof(buf), "rm -f %s/*\n", dir);
if (system(buf))
return -1;
snprintf(buf, sizeof(buf), "rmdir %s\n", dir);
return system(buf);
}
static struct list_head *test_terms_list(void)
{
static LIST_HEAD(terms);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(test_terms); i++)
list_add_tail(&test_terms[i].list, &terms);
return &terms;
}
static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char dir[PATH_MAX];
char *format;
struct list_head *terms = test_terms_list();
struct perf_event_attr attr;
struct perf_pmu *pmu;
int fd;
int ret;
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return -ENOMEM;
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
INIT_LIST_HEAD(&pmu->caps);
format = test_format_dir_get(dir, sizeof(dir));
if (!format) {
free(pmu);
return -EINVAL;
}
memset(&attr, 0, sizeof(attr));
fd = open(format, O_DIRECTORY);
if (fd < 0) {
ret = fd;
goto out;
}
pmu->name = strdup("perf-pmu-test");
ret = perf_pmu__format_parse(pmu, fd, /*eager_load=*/true);
if (ret)
goto out;
ret = perf_pmu__config_terms(pmu, &attr, terms, /*zero=*/false, /*err=*/NULL);
if (ret)
goto out;
ret = -EINVAL;
if (attr.config != 0xc00000000002a823)
goto out;
if (attr.config1 != 0x8000400000000145)
goto out;
if (attr.config2 != 0x0400000020041d07)
goto out;
ret = 0;
out:
test_format_dir_put(format);
perf_pmu__delete(pmu);
return ret;
}
DEFINE_SUITE("Parse perf pmu format", pmu);
| linux-master | tools/perf/tests/pmu.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include "util/debug.h"
#include "util/dso.h"
#include "util/event.h" // struct perf_sample
#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
#include "util/machine.h"
#include "util/thread.h"
#include "tests/hists_common.h"
#include <linux/kernel.h>
#include <linux/perf_event.h>
static struct {
u32 pid;
const char *comm;
} fake_threads[] = {
{ FAKE_PID_PERF1, "perf" },
{ FAKE_PID_PERF2, "perf" },
{ FAKE_PID_BASH, "bash" },
};
static struct {
u32 pid;
u64 start;
const char *filename;
} fake_mmap_info[] = {
{ FAKE_PID_PERF1, FAKE_MAP_PERF, "perf" },
{ FAKE_PID_PERF1, FAKE_MAP_LIBC, "libc" },
{ FAKE_PID_PERF1, FAKE_MAP_KERNEL, "[kernel]" },
{ FAKE_PID_PERF2, FAKE_MAP_PERF, "perf" },
{ FAKE_PID_PERF2, FAKE_MAP_LIBC, "libc" },
{ FAKE_PID_PERF2, FAKE_MAP_KERNEL, "[kernel]" },
{ FAKE_PID_BASH, FAKE_MAP_BASH, "bash" },
{ FAKE_PID_BASH, FAKE_MAP_LIBC, "libc" },
{ FAKE_PID_BASH, FAKE_MAP_KERNEL, "[kernel]" },
};
struct fake_sym {
u64 start;
u64 length;
const char *name;
};
static struct fake_sym perf_syms[] = {
{ FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" },
{ FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "run_command" },
{ FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "cmd_record" },
};
static struct fake_sym bash_syms[] = {
{ FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" },
{ FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "xmalloc" },
{ FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "xfree" },
};
static struct fake_sym libc_syms[] = {
{ 700, 100, "malloc" },
{ 800, 100, "free" },
{ 900, 100, "realloc" },
{ FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "malloc" },
{ FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "free" },
{ FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "realloc" },
};
static struct fake_sym kernel_syms[] = {
{ FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "schedule" },
{ FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "page_fault" },
{ FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "sys_perf_event_open" },
};
static struct {
const char *dso_name;
struct fake_sym *syms;
size_t nr_syms;
} fake_symbols[] = {
{ "perf", perf_syms, ARRAY_SIZE(perf_syms) },
{ "bash", bash_syms, ARRAY_SIZE(bash_syms) },
{ "libc", libc_syms, ARRAY_SIZE(libc_syms) },
{ "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
};
struct machine *setup_fake_machine(struct machines *machines)
{
struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
size_t i;
if (machine == NULL) {
pr_debug("Not enough memory for machine setup\n");
return NULL;
}
for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
struct thread *thread;
thread = machine__findnew_thread(machine, fake_threads[i].pid,
fake_threads[i].pid);
if (thread == NULL)
goto out;
thread__set_comm(thread, fake_threads[i].comm, 0);
thread__put(thread);
}
for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
struct perf_sample sample = {
.cpumode = PERF_RECORD_MISC_USER,
};
union perf_event fake_mmap_event = {
.mmap = {
.pid = fake_mmap_info[i].pid,
.tid = fake_mmap_info[i].pid,
.start = fake_mmap_info[i].start,
.len = FAKE_MAP_LENGTH,
.pgoff = 0ULL,
},
};
strcpy(fake_mmap_event.mmap.filename,
fake_mmap_info[i].filename);
machine__process_mmap_event(machine, &fake_mmap_event, &sample);
}
for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
size_t k;
struct dso *dso;
dso = machine__findnew_dso(machine, fake_symbols[i].dso_name);
if (dso == NULL)
goto out;
/* emulate dso__load() */
dso__set_loaded(dso);
for (k = 0; k < fake_symbols[i].nr_syms; k++) {
struct symbol *sym;
struct fake_sym *fsym = &fake_symbols[i].syms[k];
sym = symbol__new(fsym->start, fsym->length,
STB_GLOBAL, STT_FUNC, fsym->name);
if (sym == NULL) {
dso__put(dso);
goto out;
}
symbols__insert(&dso->symbols, sym);
}
dso__put(dso);
}
return machine;
out:
pr_debug("Not enough memory for machine setup\n");
machine__delete_threads(machine);
return NULL;
}
void print_hists_in(struct hists *hists)
{
int i = 0;
struct rb_root_cached *root;
struct rb_node *node;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
pr_info("----- %s --------\n", __func__);
node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
he = rb_entry(node, struct hist_entry, rb_node_in);
if (!he->filtered) {
struct dso *dso = map__dso(he->ms.map);
pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
i, thread__comm_str(he->thread),
dso->short_name,
he->ms.sym->name, he->stat.period);
}
i++;
node = rb_next(node);
}
}
void print_hists_out(struct hists *hists)
{
int i = 0;
struct rb_root_cached *root;
struct rb_node *node;
root = &hists->entries;
pr_info("----- %s --------\n", __func__);
node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
he = rb_entry(node, struct hist_entry, rb_node);
if (!he->filtered) {
struct dso *dso = map__dso(he->ms.map);
pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n",
i, thread__comm_str(he->thread), thread__tid(he->thread),
dso->short_name,
he->ms.sym->name, he->stat.period,
he->stat_acc ? he->stat_acc->period : 0);
}
i++;
node = rb_next(node);
}
}
| linux-master | tools/perf/tests/hists_common.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <api/fs/tracing_path.h>
#include <linux/err.h>
#include <linux/string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "thread_map.h"
#include "evsel.h"
#include "debug.h"
#include "tests.h"
#include "util/counts.h"
static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int err = TEST_FAIL, fd;
struct evsel *evsel;
unsigned int nr_openat_calls = 111, i;
struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
char sbuf[STRERR_BUFSIZE];
char errbuf[BUFSIZ];
if (threads == NULL) {
pr_debug("thread_map__new\n");
return TEST_FAIL;
}
evsel = evsel__newtp("syscalls", "sys_enter_openat");
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
err = TEST_SKIP;
goto out_thread_map_delete;
}
if (evsel__open_per_thread(evsel, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
err = TEST_SKIP;
goto out_evsel_delete;
}
for (i = 0; i < nr_openat_calls; ++i) {
fd = openat(0, "/etc/passwd", O_RDONLY);
close(fd);
}
if (evsel__read_on_cpu(evsel, 0, 0) < 0) {
pr_debug("evsel__read_on_cpu\n");
goto out_close_fd;
}
if (perf_counts(evsel->counts, 0, 0)->val != nr_openat_calls) {
pr_debug("evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
nr_openat_calls, perf_counts(evsel->counts, 0, 0)->val);
goto out_close_fd;
}
err = TEST_OK;
out_close_fd:
perf_evsel__close_fd(&evsel->core);
out_evsel_delete:
evsel__delete(evsel);
out_thread_map_delete:
perf_thread_map__put(threads);
return err;
}
static struct test_case tests__openat_syscall_event[] = {
TEST_CASE_REASON("Detect openat syscall event",
openat_syscall_event,
"permissions"),
{ .name = NULL, }
};
struct test_suite suite__openat_syscall_event = {
.desc = "Detect openat syscall event",
.test_cases = tests__openat_syscall_event,
};
| linux-master | tools/perf/tests/openat-syscall.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/rbtree.h>
#include <inttypes.h>
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include "dso.h"
#include "map.h"
#include "symbol.h"
#include <internal/lib.h> // page_size
#include "tests.h"
#include "debug.h"
#include "machine.h"
#define UM(x) map__unmap_ip(kallsyms_map, (x))
static bool is_ignored_symbol(const char *name, char type)
{
/* Symbol names that exactly match to the following are ignored.*/
static const char * const ignored_symbols[] = {
/*
* Symbols which vary between passes. Passes 1 and 2 must have
* identical symbol lists. The kallsyms_* symbols below are
* only added after pass 1, they would be included in pass 2
* when --all-symbols is specified so exclude them to get a
* stable symbol list.
*/
"kallsyms_addresses",
"kallsyms_offsets",
"kallsyms_relative_base",
"kallsyms_num_syms",
"kallsyms_names",
"kallsyms_markers",
"kallsyms_token_table",
"kallsyms_token_index",
/* Exclude linker generated symbols which vary between passes */
"_SDA_BASE_", /* ppc */
"_SDA2_BASE_", /* ppc */
NULL
};
/* Symbol names that begin with the following are ignored.*/
static const char * const ignored_prefixes[] = {
"$", /* local symbols for ARM, MIPS, etc. */
".L", /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
"__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */
"__kvm_nvhe_$", /* arm64 local symbols in non-VHE KVM namespace */
"__kvm_nvhe_.L", /* arm64 local symbols in non-VHE KVM namespace */
"__AArch64ADRPThunk_", /* arm64 lld */
"__ARMV5PILongThunk_", /* arm lld */
"__ARMV7PILongThunk_",
"__ThumbV7PILongThunk_",
"__LA25Thunk_", /* mips lld */
"__microLA25Thunk_",
NULL
};
/* Symbol names that end with the following are ignored.*/
static const char * const ignored_suffixes[] = {
"_from_arm", /* arm */
"_from_thumb", /* arm */
"_veneer", /* arm */
NULL
};
/* Symbol names that contain the following are ignored.*/
static const char * const ignored_matches[] = {
".long_branch.", /* ppc stub */
".plt_branch.", /* ppc stub */
NULL
};
const char * const *p;
for (p = ignored_symbols; *p; p++)
if (!strcmp(name, *p))
return true;
for (p = ignored_prefixes; *p; p++)
if (!strncmp(name, *p, strlen(*p)))
return true;
for (p = ignored_suffixes; *p; p++) {
int l = strlen(name) - strlen(*p);
if (l >= 0 && !strcmp(name + l, *p))
return true;
}
for (p = ignored_matches; *p; p++) {
if (strstr(name, *p))
return true;
}
if (type == 'U' || type == 'u')
return true;
/* exclude debugging symbols */
if (type == 'N' || type == 'n')
return true;
if (toupper(type) == 'A') {
/* Keep these useful absolute symbols */
if (strcmp(name, "__kernel_syscall_via_break") &&
strcmp(name, "__kernel_syscall_via_epc") &&
strcmp(name, "__kernel_sigtramp") &&
strcmp(name, "__gp"))
return true;
}
return false;
}
static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct rb_node *nd;
struct symbol *sym;
struct map *kallsyms_map, *vmlinux_map;
struct map_rb_node *rb_node;
struct machine kallsyms, vmlinux;
struct maps *maps;
u64 mem_start, mem_end;
bool header_printed;
/*
* Step 1:
*
* Init the machines that will hold kernel, modules obtained from
* both vmlinux + .ko files and from /proc/kallsyms split by modules.
*/
machine__init(&kallsyms, "", HOST_KERNEL_ID);
machine__init(&vmlinux, "", HOST_KERNEL_ID);
maps = machine__kernel_maps(&vmlinux);
/*
* Step 2:
*
* Create the kernel maps for kallsyms and the DSO where we will then
* load /proc/kallsyms. Also create the modules maps from /proc/modules
* and find the .ko files that match them in /lib/modules/`uname -r`/.
*/
if (machine__create_kernel_maps(&kallsyms) < 0) {
pr_debug("machine__create_kernel_maps failed");
err = TEST_SKIP;
goto out;
}
/*
* Step 3:
*
* Load and split /proc/kallsyms into multiple maps, one per module.
* Do not use kcore, as this test was designed before kcore support
* and has parts that only make sense if using the non-kcore code.
* XXX: extend it to stress the kcorre code as well, hint: the list
* of modules extracted from /proc/kcore, in its current form, can't
* be compacted against the list of modules found in the "vmlinux"
* code and with the one got from /proc/modules from the "kallsyms" code.
*/
if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
pr_debug("machine__load_kallsyms failed");
err = TEST_SKIP;
goto out;
}
/*
* Step 4:
*
* kallsyms will be internally on demand sorted by name so that we can
* find the reference relocation * symbol, i.e. the symbol we will use
* to see if the running kernel was relocated by checking if it has the
* same value in the vmlinux file we load.
*/
kallsyms_map = machine__kernel_map(&kallsyms);
/*
* Step 5:
*
* Now repeat step 2, this time for the vmlinux file we'll auto-locate.
*/
if (machine__create_kernel_maps(&vmlinux) < 0) {
pr_info("machine__create_kernel_maps failed");
goto out;
}
vmlinux_map = machine__kernel_map(&vmlinux);
/*
* Step 6:
*
* Locate a vmlinux file in the vmlinux path that has a buildid that
* matches the one of the running kernel.
*
* While doing that look if we find the ref reloc symbol, if we find it
* we'll have its ref_reloc_symbol.unrelocated_addr and then
* maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
* to fixup the symbols.
*/
if (machine__load_vmlinux_path(&vmlinux) <= 0) {
pr_info("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
err = TEST_SKIP;
goto out;
}
err = 0;
/*
* Step 7:
*
* Now look at the symbols in the vmlinux DSO and check if we find all of them
* in the kallsyms dso. For the ones that are in both, check its names and
* end addresses too.
*/
map__for_each_symbol(vmlinux_map, sym, nd) {
struct symbol *pair, *first_pair;
sym = rb_entry(nd, struct symbol, rb_node);
if (sym->start == sym->end)
continue;
mem_start = map__unmap_ip(vmlinux_map, sym->start);
mem_end = map__unmap_ip(vmlinux_map, sym->end);
first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
pair = first_pair;
if (pair && UM(pair->start) == mem_start) {
next_pair:
if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
/*
* kallsyms don't have the symbol end, so we
* set that by using the next symbol start - 1,
* in some cases we get this up to a page
* wrong, trace_kmalloc when I was developing
* this code was one such example, 2106 bytes
* off the real size. More than that and we
* _really_ have a problem.
*/
s64 skew = mem_end - UM(pair->end);
if (llabs(skew) >= page_size)
pr_debug("WARN: %#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
mem_start, sym->name, mem_end,
UM(pair->end));
/*
* Do not count this as a failure, because we
* could really find a case where it's not
* possible to get proper function end from
* kallsyms.
*/
continue;
} else {
pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
if (pair) {
if (UM(pair->start) == mem_start)
goto next_pair;
pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
mem_start, sym->name, pair->name);
} else {
pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
mem_start, sym->name, first_pair->name);
}
continue;
}
} else if (mem_start == map__end(kallsyms.vmlinux_map)) {
/*
* Ignore aliases to _etext, i.e. to the end of the kernel text area,
* such as __indirect_thunk_end.
*/
continue;
} else if (is_ignored_symbol(sym->name, sym->type)) {
/*
* Ignore hidden symbols, see scripts/kallsyms.c for the details
*/
continue;
} else {
pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
mem_start, sym->name);
}
err = -1;
}
if (verbose <= 0)
goto out;
header_printed = false;
maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
struct dso *dso = map__dso(map);
/*
* If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
* the kernel will have the path for the vmlinux file being used,
* so use the short name, less descriptive but the same ("[kernel]" in
* both cases.
*/
struct map *pair = maps__find_by_name(kallsyms.kmaps, (dso->kernel ?
dso->short_name :
dso->name));
if (pair) {
map__set_priv(pair, 1);
} else {
if (!header_printed) {
pr_info("WARN: Maps only in vmlinux:\n");
header_printed = true;
}
map__fprintf(map, stderr);
}
}
header_printed = false;
maps__for_each_entry(maps, rb_node) {
struct map *pair, *map = rb_node->map;
mem_start = map__unmap_ip(vmlinux_map, map__start(map));
mem_end = map__unmap_ip(vmlinux_map, map__end(map));
pair = maps__find(kallsyms.kmaps, mem_start);
if (pair == NULL || map__priv(pair))
continue;
if (map__start(pair) == mem_start) {
struct dso *dso = map__dso(map);
if (!header_printed) {
pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
header_printed = true;
}
pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
map__start(map), map__end(map), map__pgoff(map), dso->name);
if (mem_end != map__end(pair))
pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
map__start(pair), map__end(pair), map__pgoff(pair));
pr_info(" %s\n", dso->name);
map__set_priv(pair, 1);
}
}
header_printed = false;
maps = machine__kernel_maps(&kallsyms);
maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
if (!map__priv(map)) {
if (!header_printed) {
pr_info("WARN: Maps only in kallsyms:\n");
header_printed = true;
}
map__fprintf(map, stderr);
}
}
out:
machine__exit(&kallsyms);
machine__exit(&vmlinux);
return err;
}
DEFINE_SUITE("vmlinux symtab matches kallsyms", vmlinux_matches_kallsyms);
| linux-master | tools/perf/tests/vmlinux-kallsyms.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include "map_symbol.h"
#include "branch.h"
#include "event.h"
#include "evsel.h"
#include "debug.h"
#include "util/synthetic-events.h"
#include "util/util.h"
#include "tests.h"
#define COMP(m) do { \
if (s1->m != s2->m) { \
pr_debug("Samples differ at '"#m"'\n"); \
return false; \
} \
} while (0)
#define MCOMP(m) do { \
if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
pr_debug("Samples differ at '"#m"'\n"); \
return false; \
} \
} while (0)
/*
* Hardcode the expected values for branch_entry flags.
* These are based on the input value (213) specified
* in branch_stack variable.
*/
#define BS_EXPECTED_BE 0xa000d00000000000
#define BS_EXPECTED_LE 0x1aa00000000
#define FLAG(s) s->branch_stack->entries[i].flags
static bool samples_same(const struct perf_sample *s1,
const struct perf_sample *s2,
u64 type, u64 read_format, bool needs_swap)
{
size_t i;
if (type & PERF_SAMPLE_IDENTIFIER)
COMP(id);
if (type & PERF_SAMPLE_IP)
COMP(ip);
if (type & PERF_SAMPLE_TID) {
COMP(pid);
COMP(tid);
}
if (type & PERF_SAMPLE_TIME)
COMP(time);
if (type & PERF_SAMPLE_ADDR)
COMP(addr);
if (type & PERF_SAMPLE_ID)
COMP(id);
if (type & PERF_SAMPLE_STREAM_ID)
COMP(stream_id);
if (type & PERF_SAMPLE_CPU)
COMP(cpu);
if (type & PERF_SAMPLE_PERIOD)
COMP(period);
if (type & PERF_SAMPLE_READ) {
if (read_format & PERF_FORMAT_GROUP)
COMP(read.group.nr);
else
COMP(read.one.value);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
COMP(read.time_enabled);
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
COMP(read.time_running);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
for (i = 0; i < s1->read.group.nr; i++) {
/* FIXME: check values without LOST */
if (read_format & PERF_FORMAT_LOST)
MCOMP(read.group.values[i]);
}
} else {
COMP(read.one.id);
if (read_format & PERF_FORMAT_LOST)
COMP(read.one.lost);
}
}
if (type & PERF_SAMPLE_CALLCHAIN) {
COMP(callchain->nr);
for (i = 0; i < s1->callchain->nr; i++)
COMP(callchain->ips[i]);
}
if (type & PERF_SAMPLE_RAW) {
COMP(raw_size);
if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
pr_debug("Samples differ at 'raw_data'\n");
return false;
}
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
COMP(branch_stack->nr);
COMP(branch_stack->hw_idx);
for (i = 0; i < s1->branch_stack->nr; i++) {
if (needs_swap)
return ((host_is_bigendian()) ?
(FLAG(s2).value == BS_EXPECTED_BE) :
(FLAG(s2).value == BS_EXPECTED_LE));
else
MCOMP(branch_stack->entries[i]);
}
}
if (type & PERF_SAMPLE_REGS_USER) {
size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
COMP(user_regs.mask);
COMP(user_regs.abi);
if (s1->user_regs.abi &&
(!s1->user_regs.regs || !s2->user_regs.regs ||
memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
pr_debug("Samples differ at 'user_regs'\n");
return false;
}
}
if (type & PERF_SAMPLE_STACK_USER) {
COMP(user_stack.size);
if (memcmp(s1->user_stack.data, s2->user_stack.data,
s1->user_stack.size)) {
pr_debug("Samples differ at 'user_stack'\n");
return false;
}
}
if (type & PERF_SAMPLE_WEIGHT)
COMP(weight);
if (type & PERF_SAMPLE_DATA_SRC)
COMP(data_src);
if (type & PERF_SAMPLE_TRANSACTION)
COMP(transaction);
if (type & PERF_SAMPLE_REGS_INTR) {
size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
COMP(intr_regs.mask);
COMP(intr_regs.abi);
if (s1->intr_regs.abi &&
(!s1->intr_regs.regs || !s2->intr_regs.regs ||
memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
pr_debug("Samples differ at 'intr_regs'\n");
return false;
}
}
if (type & PERF_SAMPLE_PHYS_ADDR)
COMP(phys_addr);
if (type & PERF_SAMPLE_CGROUP)
COMP(cgroup);
if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
COMP(data_page_size);
if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
COMP(code_page_size);
if (type & PERF_SAMPLE_AUX) {
COMP(aux_sample.size);
if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
s1->aux_sample.size)) {
pr_debug("Samples differ at 'aux_sample'\n");
return false;
}
}
return true;
}
static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
{
struct evsel evsel = {
.needs_swap = false,
.core = {
. attr = {
.sample_type = sample_type,
.read_format = read_format,
},
},
};
union perf_event *event;
union {
struct ip_callchain callchain;
u64 data[64];
} callchain = {
/* 3 ips */
.data = {3, 201, 202, 203},
};
union {
struct branch_stack branch_stack;
u64 data[64];
} branch_stack = {
/* 1 branch_entry */
.data = {1, -1ULL, 211, 212, 213},
};
u64 regs[64];
const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
struct perf_sample sample = {
.ip = 101,
.pid = 102,
.tid = 103,
.time = 104,
.addr = 105,
.id = 106,
.stream_id = 107,
.period = 108,
.weight = 109,
.cpu = 110,
.raw_size = sizeof(raw_data),
.data_src = 111,
.transaction = 112,
.raw_data = (void *)raw_data,
.callchain = &callchain.callchain,
.no_hw_idx = false,
.branch_stack = &branch_stack.branch_stack,
.user_regs = {
.abi = PERF_SAMPLE_REGS_ABI_64,
.mask = sample_regs,
.regs = regs,
},
.user_stack = {
.size = sizeof(data),
.data = (void *)data,
},
.read = {
.time_enabled = 0x030a59d664fca7deULL,
.time_running = 0x011b6ae553eb98edULL,
},
.intr_regs = {
.abi = PERF_SAMPLE_REGS_ABI_64,
.mask = sample_regs,
.regs = regs,
},
.phys_addr = 113,
.cgroup = 114,
.data_page_size = 115,
.code_page_size = 116,
.aux_sample = {
.size = sizeof(aux_data),
.data = (void *)aux_data,
},
};
struct sample_read_value values[] = {{1, 5, 0}, {9, 3, 0}, {2, 7, 0}, {6, 4, 1},};
struct perf_sample sample_out, sample_out_endian;
size_t i, sz, bufsz;
int err, ret = -1;
if (sample_type & PERF_SAMPLE_REGS_USER)
evsel.core.attr.sample_regs_user = sample_regs;
if (sample_type & PERF_SAMPLE_REGS_INTR)
evsel.core.attr.sample_regs_intr = sample_regs;
if (sample_type & PERF_SAMPLE_BRANCH_STACK)
evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
for (i = 0; i < sizeof(regs); i++)
*(i + (u8 *)regs) = i & 0xfe;
if (read_format & PERF_FORMAT_GROUP) {
sample.read.group.nr = 4;
sample.read.group.values = values;
} else {
sample.read.one.value = 0x08789faeb786aa87ULL;
sample.read.one.id = 99;
sample.read.one.lost = 1;
}
sz = perf_event__sample_event_size(&sample, sample_type, read_format);
bufsz = sz + 4096; /* Add a bit for overrun checking */
event = malloc(bufsz);
if (!event) {
pr_debug("malloc failed\n");
return -1;
}
memset(event, 0xff, bufsz);
event->header.type = PERF_RECORD_SAMPLE;
event->header.misc = 0;
event->header.size = sz;
err = perf_event__synthesize_sample(event, sample_type, read_format,
&sample);
if (err) {
pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
"perf_event__synthesize_sample", sample_type, err);
goto out_free;
}
/* The data does not contain 0xff so we use that to check the size */
for (i = bufsz; i > 0; i--) {
if (*(i - 1 + (u8 *)event) != 0xff)
break;
}
if (i != sz) {
pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
i, sz);
goto out_free;
}
evsel.sample_size = __evsel__sample_size(sample_type);
err = evsel__parse_sample(&evsel, event, &sample_out);
if (err) {
pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
"evsel__parse_sample", sample_type, err);
goto out_free;
}
if (!samples_same(&sample, &sample_out, sample_type, read_format, evsel.needs_swap)) {
pr_debug("parsing failed for sample_type %#"PRIx64"\n",
sample_type);
goto out_free;
}
if (sample_type == PERF_SAMPLE_BRANCH_STACK) {
evsel.needs_swap = true;
evsel.sample_size = __evsel__sample_size(sample_type);
err = evsel__parse_sample(&evsel, event, &sample_out_endian);
if (err) {
pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
"evsel__parse_sample", sample_type, err);
goto out_free;
}
if (!samples_same(&sample, &sample_out_endian, sample_type, read_format, evsel.needs_swap)) {
pr_debug("parsing failed for sample_type %#"PRIx64"\n",
sample_type);
goto out_free;
}
}
ret = 0;
out_free:
free(event);
if (ret && read_format)
pr_debug("read_format %#"PRIx64"\n", read_format);
return ret;
}
/**
* test__sample_parsing - test sample parsing.
*
* This function implements a test that synthesizes a sample event, parses it
* and then checks that the parsed sample matches the original sample. The test
* checks sample format bits separately and together. If the test passes %0 is
* returned, otherwise %-1 is returned.
*/
static int test__sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 28, 29, 30, 31};
u64 sample_type;
u64 sample_regs;
size_t i;
int err;
/*
* Fail the test if it has not been updated when new sample format bits
* were added. Please actually update the test rather than just change
* the condition below.
*/
if (PERF_SAMPLE_MAX > PERF_SAMPLE_WEIGHT_STRUCT << 1) {
pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
return -1;
}
/* Test each sample format bit separately */
for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
sample_type <<= 1) {
/* Test read_format variations */
if (sample_type == PERF_SAMPLE_READ) {
for (i = 0; i < ARRAY_SIZE(rf); i++) {
err = do_test(sample_type, 0, rf[i]);
if (err)
return err;
}
continue;
}
sample_regs = 0;
if (sample_type == PERF_SAMPLE_REGS_USER)
sample_regs = 0x3fff;
if (sample_type == PERF_SAMPLE_REGS_INTR)
sample_regs = 0xff0fff;
err = do_test(sample_type, sample_regs, 0);
if (err)
return err;
}
/*
* Test all sample format bits together
* Note: PERF_SAMPLE_WEIGHT and PERF_SAMPLE_WEIGHT_STRUCT cannot
* be set simultaneously.
*/
sample_type = (PERF_SAMPLE_MAX - 1) & ~PERF_SAMPLE_WEIGHT;
sample_regs = 0x3fff; /* shared yb intr and user regs */
for (i = 0; i < ARRAY_SIZE(rf); i++) {
err = do_test(sample_type, sample_regs, rf[i]);
if (err)
return err;
}
return 0;
}
DEFINE_SUITE("Sample parsing", sample_parsing);
| linux-master | tools/perf/tests/sample-parsing.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test dlfilter C API. A perf.data file is synthesized and then processed
* by perf script with dlfilters named dlfilter-test-api-v*.so. Also a C file
* is compiled to provide a dso to match the synthesized perf.data file.
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/perf_event.h>
#include <internal/lib.h>
#include <subcmd/exec-cmd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <inttypes.h>
#include <libgen.h>
#include <string.h>
#include <errno.h>
#include "debug.h"
#include "tool.h"
#include "event.h"
#include "header.h"
#include "machine.h"
#include "dso.h"
#include "map.h"
#include "symbol.h"
#include "synthetic-events.h"
#include "util.h"
#include "archinsn.h"
#include "dlfilter.h"
#include "tests.h"
#include "util/sample.h"
#define MAP_START 0x400000
#define DLFILTER_TEST_NAME_MAX 128
struct test_data {
struct perf_tool tool;
struct machine *machine;
int fd;
u64 foo;
u64 bar;
u64 ip;
u64 addr;
char name[DLFILTER_TEST_NAME_MAX];
char desc[DLFILTER_TEST_NAME_MAX];
char perf[PATH_MAX];
char perf_data_file_name[PATH_MAX];
char c_file_name[PATH_MAX];
char prog_file_name[PATH_MAX];
char dlfilters[PATH_MAX];
};
static int test_result(const char *msg, int ret)
{
pr_debug("%s\n", msg);
return ret;
}
static int process(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct test_data *td = container_of(tool, struct test_data, tool);
int fd = td->fd;
if (writen(fd, event, event->header.size) != event->header.size)
return -1;
return 0;
}
#define MAXCMD 4096
#define REDIRECT_TO_DEV_NULL " >/dev/null 2>&1"
static __printf(1, 2) int system_cmd(const char *fmt, ...)
{
char cmd[MAXCMD + sizeof(REDIRECT_TO_DEV_NULL)];
int ret;
va_list args;
va_start(args, fmt);
ret = vsnprintf(cmd, MAXCMD, fmt, args);
va_end(args);
if (ret <= 0 || ret >= MAXCMD)
return -1;
if (verbose <= 0)
strcat(cmd, REDIRECT_TO_DEV_NULL);
pr_debug("Command: %s\n", cmd);
ret = system(cmd);
if (ret)
pr_debug("Failed with return value %d\n", ret);
return ret;
}
static bool have_gcc(void)
{
pr_debug("Checking for gcc\n");
return !system_cmd("gcc --version");
}
static int write_attr(struct test_data *td, u64 sample_type, u64 *id)
{
struct perf_event_attr attr = {
.size = sizeof(attr),
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
.sample_type = sample_type,
.sample_period = 1,
};
return perf_event__synthesize_attr(&td->tool, &attr, 1, id, process);
}
static int write_comm(int fd, pid_t pid, pid_t tid, const char *comm_str)
{
struct perf_record_comm comm;
ssize_t sz = sizeof(comm);
comm.header.type = PERF_RECORD_COMM;
comm.header.misc = PERF_RECORD_MISC_USER;
comm.header.size = sz;
comm.pid = pid;
comm.tid = tid;
strncpy(comm.comm, comm_str, 16);
if (writen(fd, &comm, sz) != sz) {
pr_debug("%s failed\n", __func__);
return -1;
}
return 0;
}
static int write_mmap(int fd, pid_t pid, pid_t tid, u64 start, u64 len, u64 pgoff,
const char *filename)
{
char buf[PERF_SAMPLE_MAX_SIZE];
struct perf_record_mmap *mmap = (struct perf_record_mmap *)buf;
size_t fsz = roundup(strlen(filename) + 1, 8);
ssize_t sz = sizeof(*mmap) - sizeof(mmap->filename) + fsz;
mmap->header.type = PERF_RECORD_MMAP;
mmap->header.misc = PERF_RECORD_MISC_USER;
mmap->header.size = sz;
mmap->pid = pid;
mmap->tid = tid;
mmap->start = start;
mmap->len = len;
mmap->pgoff = pgoff;
strncpy(mmap->filename, filename, sizeof(mmap->filename));
if (writen(fd, mmap, sz) != sz) {
pr_debug("%s failed\n", __func__);
return -1;
}
return 0;
}
static int write_sample(struct test_data *td, u64 sample_type, u64 id, pid_t pid, pid_t tid)
{
char buf[PERF_SAMPLE_MAX_SIZE];
union perf_event *event = (union perf_event *)buf;
struct perf_sample sample = {
.ip = td->ip,
.addr = td->addr,
.id = id,
.time = 1234567890,
.cpu = 31,
.pid = pid,
.tid = tid,
.period = 543212345,
.stream_id = 101,
};
int err;
event->header.type = PERF_RECORD_SAMPLE;
event->header.misc = PERF_RECORD_MISC_USER;
event->header.size = perf_event__sample_event_size(&sample, sample_type, 0);
err = perf_event__synthesize_sample(event, sample_type, 0, &sample);
if (err)
return test_result("perf_event__synthesize_sample() failed", TEST_FAIL);
err = process(&td->tool, event, &sample, td->machine);
if (err)
return test_result("Failed to write sample", TEST_FAIL);
return TEST_OK;
}
static void close_fd(int fd)
{
if (fd >= 0)
close(fd);
}
static const char *prog = "int bar(){};int foo(){bar();};int main(){foo();return 0;}";
static int write_prog(char *file_name)
{
int fd = creat(file_name, 0644);
ssize_t n = strlen(prog);
bool err = fd < 0 || writen(fd, prog, n) != n;
close_fd(fd);
return err ? -1 : 0;
}
static int get_dlfilters_path(const char *name, char *buf, size_t sz)
{
char perf[PATH_MAX];
char path[PATH_MAX];
char *perf_path;
char *exec_path;
perf_exe(perf, sizeof(perf));
perf_path = dirname(perf);
snprintf(path, sizeof(path), "%s/dlfilters/%s", perf_path, name);
if (access(path, R_OK)) {
exec_path = get_argv_exec_path();
if (!exec_path)
return -1;
snprintf(path, sizeof(path), "%s/dlfilters/%s", exec_path, name);
free(exec_path);
if (access(path, R_OK))
return -1;
}
strlcpy(buf, dirname(path), sz);
return 0;
}
static int check_filter_desc(struct test_data *td)
{
char *long_desc = NULL;
char *desc = NULL;
int ret;
if (get_filter_desc(td->dlfilters, td->name, &desc, &long_desc) &&
long_desc && !strcmp(long_desc, "Filter used by the 'dlfilter C API' perf test") &&
desc && !strcmp(desc, td->desc))
ret = 0;
else
ret = -1;
free(desc);
free(long_desc);
return ret;
}
static int get_ip_addr(struct test_data *td)
{
struct map *map;
struct symbol *sym;
map = dso__new_map(td->prog_file_name);
if (!map)
return -1;
sym = map__find_symbol_by_name(map, "foo");
if (sym)
td->foo = sym->start;
sym = map__find_symbol_by_name(map, "bar");
if (sym)
td->bar = sym->start;
map__put(map);
td->ip = MAP_START + td->foo;
td->addr = MAP_START + td->bar;
return td->foo && td->bar ? 0 : -1;
}
static int do_run_perf_script(struct test_data *td, int do_early)
{
return system_cmd("%s script -i %s "
"--dlfilter %s/%s "
"--dlarg first "
"--dlarg %d "
"--dlarg %" PRIu64 " "
"--dlarg %" PRIu64 " "
"--dlarg %d "
"--dlarg last",
td->perf, td->perf_data_file_name, td->dlfilters,
td->name, verbose, td->ip, td->addr, do_early);
}
static int run_perf_script(struct test_data *td)
{
int do_early;
int err;
for (do_early = 0; do_early < 3; do_early++) {
err = do_run_perf_script(td, do_early);
if (err)
return err;
}
return 0;
}
#define TEST_SAMPLE_TYPE (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_TIME | \
PERF_SAMPLE_ADDR | PERF_SAMPLE_CPU | \
PERF_SAMPLE_PERIOD | PERF_SAMPLE_STREAM_ID)
static int test__dlfilter_test(struct test_data *td)
{
u64 sample_type = TEST_SAMPLE_TYPE;
pid_t pid = 12345;
pid_t tid = 12346;
u64 id = 99;
int err;
if (get_dlfilters_path(td->name, td->dlfilters, PATH_MAX))
return test_result("dlfilters not found", TEST_SKIP);
if (check_filter_desc(td))
return test_result("Failed to get expected filter description", TEST_FAIL);
if (!have_gcc())
return test_result("gcc not found", TEST_SKIP);
pr_debug("dlfilters path: %s\n", td->dlfilters);
if (write_prog(td->c_file_name))
return test_result("Failed to write test C file", TEST_FAIL);
if (verbose > 1)
system_cmd("cat %s ; echo", td->c_file_name);
if (system_cmd("gcc -g -o %s %s", td->prog_file_name, td->c_file_name))
return TEST_FAIL;
if (verbose > 2)
system_cmd("objdump -x -dS %s", td->prog_file_name);
if (get_ip_addr(td))
return test_result("Failed to find program symbols", TEST_FAIL);
pr_debug("Creating new host machine structure\n");
td->machine = machine__new_host();
td->machine->env = &perf_env;
td->fd = creat(td->perf_data_file_name, 0644);
if (td->fd < 0)
return test_result("Failed to create test perf.data file", TEST_FAIL);
err = perf_header__write_pipe(td->fd);
if (err < 0)
return test_result("perf_header__write_pipe() failed", TEST_FAIL);
err = write_attr(td, sample_type, &id);
if (err)
return test_result("perf_event__synthesize_attr() failed", TEST_FAIL);
if (write_comm(td->fd, pid, tid, "test-prog"))
return TEST_FAIL;
if (write_mmap(td->fd, pid, tid, MAP_START, 0x10000, 0, td->prog_file_name))
return TEST_FAIL;
if (write_sample(td, sample_type, id, pid, tid) != TEST_OK)
return TEST_FAIL;
if (verbose > 1)
system_cmd("%s script -i %s -D", td->perf, td->perf_data_file_name);
err = run_perf_script(td);
if (err)
return TEST_FAIL;
return TEST_OK;
}
static void unlink_path(const char *path)
{
if (*path)
unlink(path);
}
static void test_data__free(struct test_data *td)
{
machine__delete(td->machine);
close_fd(td->fd);
if (verbose <= 2) {
unlink_path(td->c_file_name);
unlink_path(td->prog_file_name);
unlink_path(td->perf_data_file_name);
}
}
static int test__dlfilter_ver(int ver)
{
struct test_data td = {.fd = -1};
int pid = getpid();
int err;
pr_debug("\n-- Testing version %d API --\n", ver);
perf_exe(td.perf, sizeof(td.perf));
snprintf(td.name, sizeof(td.name), "dlfilter-test-api-v%d.so", ver);
snprintf(td.desc, sizeof(td.desc), "dlfilter to test v%d C API", ver);
snprintf(td.perf_data_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-perf-data", pid);
snprintf(td.c_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog.c", pid);
snprintf(td.prog_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog", pid);
err = test__dlfilter_test(&td);
test_data__free(&td);
return err;
}
static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = test__dlfilter_ver(0);
if (err)
return err;
/* No test for version 1 */
return test__dlfilter_ver(2);
}
DEFINE_SUITE("dlfilter C API", dlfilter);
| linux-master | tools/perf/tests/dlfilter-test.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <subcmd/exec-cmd.h>
#include <subcmd/parse-options.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include "builtin.h"
#include "builtin-test-list.h"
#include "color.h"
#include "debug.h"
#include "hist.h"
#include "intlist.h"
#include "string2.h"
#include "symbol.h"
#include "tests.h"
#include "util/rlimit.h"
/*
* As this is a singleton built once for the run of the process, there is
* no value in trying to free it and just let it stay around until process
* exits when it's cleaned up.
*/
static size_t files_num = 0;
static struct script_file *files = NULL;
static int files_max_width = 0;
static const char *shell_tests__dir(char *path, size_t size)
{
const char *devel_dirs[] = { "./tools/perf/tests", "./tests", };
char *exec_path;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(devel_dirs); ++i) {
struct stat st;
if (!lstat(devel_dirs[i], &st)) {
scnprintf(path, size, "%s/shell", devel_dirs[i]);
if (!lstat(devel_dirs[i], &st))
return path;
}
}
/* Then installed path. */
exec_path = get_argv_exec_path();
scnprintf(path, size, "%s/tests/shell", exec_path);
free(exec_path);
return path;
}
static const char *shell_test__description(char *description, size_t size,
const char *path, const char *name)
{
FILE *fp;
char filename[PATH_MAX];
int ch;
path__join(filename, sizeof(filename), path, name);
fp = fopen(filename, "r");
if (!fp)
return NULL;
/* Skip first line - should be #!/bin/sh Shebang */
do {
ch = fgetc(fp);
} while (ch != EOF && ch != '\n');
description = fgets(description, size, fp);
fclose(fp);
/* Assume first char on line is omment everything after that desc */
return description ? strim(description + 1) : NULL;
}
/* Is this full file path a shell script */
static bool is_shell_script(const char *path)
{
const char *ext;
ext = strrchr(path, '.');
if (!ext)
return false;
if (!strcmp(ext, ".sh")) { /* Has .sh extension */
if (access(path, R_OK | X_OK) == 0) /* Is executable */
return true;
}
return false;
}
/* Is this file in this dir a shell script (for test purposes) */
static bool is_test_script(const char *path, const char *name)
{
char filename[PATH_MAX];
path__join(filename, sizeof(filename), path, name);
if (!is_shell_script(filename)) return false;
return true;
}
/* Duplicate a string and fall over and die if we run out of memory */
static char *strdup_check(const char *str)
{
char *newstr;
newstr = strdup(str);
if (!newstr) {
pr_err("Out of memory while duplicating test script string\n");
abort();
}
return newstr;
}
static void append_script(const char *dir, const char *file, const char *desc)
{
struct script_file *files_tmp;
size_t files_num_tmp;
int width;
files_num_tmp = files_num + 1;
if (files_num_tmp >= SIZE_MAX) {
pr_err("Too many script files\n");
abort();
}
/* Realloc is good enough, though we could realloc by chunks, not that
* anyone will ever measure performance here */
files_tmp = realloc(files,
(files_num_tmp + 1) * sizeof(struct script_file));
if (files_tmp == NULL) {
pr_err("Out of memory while building test list\n");
abort();
}
/* Add file to end and NULL terminate the struct array */
files = files_tmp;
files_num = files_num_tmp;
files[files_num - 1].dir = strdup_check(dir);
files[files_num - 1].file = strdup_check(file);
files[files_num - 1].desc = strdup_check(desc);
files[files_num].dir = NULL;
files[files_num].file = NULL;
files[files_num].desc = NULL;
width = strlen(desc); /* Track max width of desc */
if (width > files_max_width)
files_max_width = width;
}
static void append_scripts_in_dir(const char *path)
{
struct dirent **entlist;
struct dirent *ent;
int n_dirs, i;
char filename[PATH_MAX];
/* List files, sorted by alpha */
n_dirs = scandir(path, &entlist, NULL, alphasort);
if (n_dirs == -1)
return;
for (i = 0; i < n_dirs && (ent = entlist[i]); i++) {
if (ent->d_name[0] == '.')
continue; /* Skip hidden files */
if (is_test_script(path, ent->d_name)) { /* It's a test */
char bf[256];
const char *desc = shell_test__description
(bf, sizeof(bf), path, ent->d_name);
if (desc) /* It has a desc line - valid script */
append_script(path, ent->d_name, desc);
} else if (is_directory(path, ent)) { /* Scan the subdir */
path__join(filename, sizeof(filename),
path, ent->d_name);
append_scripts_in_dir(filename);
}
}
for (i = 0; i < n_dirs; i++) /* Clean up */
zfree(&entlist[i]);
free(entlist);
}
const struct script_file *list_script_files(void)
{
char path_dir[PATH_MAX];
const char *path;
if (files)
return files; /* Singleton - we already know our list */
path = shell_tests__dir(path_dir, sizeof(path_dir)); /* Walk dir */
append_scripts_in_dir(path);
return files;
}
int list_script_max_width(void)
{
list_script_files(); /* Ensure we have scanned all scripts */
return files_max_width;
}
| linux-master | tools/perf/tests/builtin-test-list.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Just test if we can load the python binding.
*/
#include <stdio.h>
#include <stdlib.h>
#include <linux/compiler.h>
#include "tests.h"
#include "util/debug.h"
static int test__python_use(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char *cmd;
int ret;
if (asprintf(&cmd, "echo \"import sys ; sys.path.insert(0, '%s'); import perf\" | %s %s",
PYTHONPATH, PYTHON, verbose > 0 ? "" : "2> /dev/null") < 0)
return -1;
pr_debug("python usage test: \"%s\"\n", cmd);
ret = system(cmd) ? -1 : 0;
free(cmd);
return ret;
}
DEFINE_SUITE("'import perf' in python", python_use);
| linux-master | tools/perf/tests/python-use.c |
// SPDX-License-Identifier: GPL-2.0
// pe-file.exe and pe-file.exe.debug built with;
// x86_64-w64-mingw32-gcc -o pe-file.exe pe-file.c
// -Wl,--file-alignment,4096 -Wl,--build-id
// x86_64-w64-mingw32-objcopy --only-keep-debug
// --compress-debug-sections pe-file.exe pe-file.exe.debug
// x86_64-w64-mingw32-objcopy --strip-debug
// --add-gnu-debuglink=pe-file.exe.debug pe-file.exe
int main(int argc, char const *argv[])
{
return 0;
}
| linux-master | tools/perf/tests/pe-file.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/bitmap.h>
#include <perf/cpumap.h>
#include <internal/cpumap.h>
#include "tests.h"
#include "debug.h"
#define NBITS 100
static unsigned long *get_bitmap(const char *str, int nbits)
{
struct perf_cpu_map *map = perf_cpu_map__new(str);
unsigned long *bm = NULL;
int i;
bm = bitmap_zalloc(nbits);
if (map && bm) {
for (i = 0; i < perf_cpu_map__nr(map); i++)
__set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
}
if (map)
perf_cpu_map__put(map);
return bm;
}
static int test_bitmap(const char *str)
{
unsigned long *bm = get_bitmap(str, NBITS);
char buf[100];
int ret;
bitmap_scnprintf(bm, NBITS, buf, sizeof(buf));
pr_debug("bitmap: %s\n", buf);
ret = !strcmp(buf, str);
free(bm);
return ret;
}
static int test__bitmap_print(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1"));
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,5"));
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3,5,7,9,11,13,15,17,19,21-40"));
TEST_ASSERT_VAL("failed to convert map", test_bitmap("2-5"));
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3-6,8-10,24,35-37"));
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3-6,8-10,24,35-37"));
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1-10,12-20,22-30,32-40"));
return 0;
}
DEFINE_SUITE("Print bitmap", bitmap_print);
| linux-master | tools/perf/tests/bitmap.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <perf/cpumap.h>
#include "cpumap.h"
#include "tests.h"
#include "session.h"
#include "evlist.h"
#include "debug.h"
#include "pmus.h"
#include <linux/err.h>
#define TEMPL "/tmp/perf-test-XXXXXX"
#define DATA_SIZE 10
static int get_temp(char *path)
{
int fd;
strcpy(path, TEMPL);
fd = mkstemp(path);
if (fd < 0) {
perror("mkstemp failed");
return -1;
}
close(fd);
return 0;
}
static int session_write_header(char *path)
{
struct perf_session *session;
struct perf_data data = {
.path = path,
.mode = PERF_DATA_MODE_WRITE,
};
session = perf_session__new(&data, NULL);
TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
session->evlist = evlist__new_default();
TEST_ASSERT_VAL("can't get evlist", session->evlist);
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_NRCPUS);
perf_header__set_feat(&session->header, HEADER_ARCH);
session->header.data_size += DATA_SIZE;
TEST_ASSERT_VAL("failed to write header",
!perf_session__write_header(session, session->evlist, data.file.fd, true));
evlist__delete(session->evlist);
perf_session__delete(session);
return 0;
}
static int check_cpu_topology(char *path, struct perf_cpu_map *map)
{
struct perf_session *session;
struct perf_data data = {
.path = path,
.mode = PERF_DATA_MODE_READ,
};
int i;
struct aggr_cpu_id id;
session = perf_session__new(&data, NULL);
TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
cpu__setup_cpunode_map();
/* On platforms with large numbers of CPUs process_cpu_topology()
* might issue an error while reading the perf.data file section
* HEADER_CPU_TOPOLOGY and the cpu_topology_map pointed to by member
* cpu is a NULL pointer.
* Example: On s390
* CPU 0 is on core_id 0 and physical_package_id 6
* CPU 1 is on core_id 1 and physical_package_id 3
*
* Core_id and physical_package_id are platform and architecture
* dependent and might have higher numbers than the CPU id.
* This actually depends on the configuration.
*
* In this case process_cpu_topology() prints error message:
* "socket_id number is too big. You may need to upgrade the
* perf tool."
*
* This is the reason why this test might be skipped. aarch64 and
* s390 always write this part of the header, even when the above
* condition is true (see do_core_id_test in header.c). So always
* run this test on those platforms.
*/
if (!session->header.env.cpu
&& strncmp(session->header.env.arch, "s390", 4)
&& strncmp(session->header.env.arch, "aarch64", 7))
return TEST_SKIP;
/*
* In powerpc pSeries platform, not all the topology information
* are exposed via sysfs. Due to restriction, detail like
* physical_package_id will be set to -1. Hence skip this
* test if physical_package_id returns -1 for cpu from perf_cpu_map.
*/
if (!strncmp(session->header.env.arch, "ppc64le", 7)) {
if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1)
return TEST_SKIP;
}
TEST_ASSERT_VAL("Session header CPU map not set", session->header.env.cpu);
for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
struct perf_cpu cpu = { .cpu = i };
if (!perf_cpu_map__has(map, cpu))
continue;
pr_debug("CPU %d, core %d, socket %d\n", i,
session->header.env.cpu[i].core_id,
session->header.env.cpu[i].socket_id);
}
// Test that CPU ID contains socket, die, core and CPU
for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match",
perf_cpu_map__cpu(map, i).cpu == id.cpu.cpu);
TEST_ASSERT_VAL("Cpu map - Core ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Cpu map - Die ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Cpu map - Thread IDX is set", id.thread_idx == -1);
}
// Test that core ID contains socket, die and core
for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Core map - Core ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
TEST_ASSERT_VAL("Core map - Socket ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Core map - Die ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Core map - Thread IDX is set", id.thread_idx == -1);
}
// Test that die ID contains socket and die
for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Die map - Socket ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Die map - Die ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Die map - Core is set", id.core == -1);
TEST_ASSERT_VAL("Die map - CPU is set", id.cpu.cpu == -1);
TEST_ASSERT_VAL("Die map - Thread IDX is set", id.thread_idx == -1);
}
// Test that socket ID contains only socket
for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Socket map - Socket ID doesn't match",
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1);
TEST_ASSERT_VAL("Socket map - Core is set", id.core == -1);
TEST_ASSERT_VAL("Socket map - CPU is set", id.cpu.cpu == -1);
TEST_ASSERT_VAL("Socket map - Thread IDX is set", id.thread_idx == -1);
}
// Test that node ID contains only node
for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Node map - Node ID doesn't match",
cpu__get_node(perf_cpu_map__cpu(map, i)) == id.node);
TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1);
TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1);
TEST_ASSERT_VAL("Node map - Core is set", id.core == -1);
TEST_ASSERT_VAL("Node map - CPU is set", id.cpu.cpu == -1);
TEST_ASSERT_VAL("Node map - Thread IDX is set", id.thread_idx == -1);
}
perf_session__delete(session);
return 0;
}
static int test__session_topology(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char path[PATH_MAX];
struct perf_cpu_map *map;
int ret = TEST_FAIL;
TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
pr_debug("templ file: %s\n", path);
if (session_write_header(path))
goto free_path;
map = perf_cpu_map__new(NULL);
if (map == NULL) {
pr_debug("failed to get system cpumap\n");
goto free_path;
}
ret = check_cpu_topology(path, map);
perf_cpu_map__put(map);
free_path:
unlink(path);
return ret;
}
DEFINE_SUITE("Session topology", session_topology);
| linux-master | tools/perf/tests/topology.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/map_symbol.h"
#include "util/mem-events.h"
#include "util/symbol.h"
#include "linux/perf_event.h"
#include "util/debug.h"
#include "tests.h"
#include <string.h>
static int check(union perf_mem_data_src data_src,
const char *string)
{
char out[100];
char failure[100];
struct mem_info mi = { .data_src = data_src };
int n;
n = perf_mem__snp_scnprintf(out, sizeof out, &mi);
n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, &mi);
scnprintf(failure, sizeof failure, "unexpected %s", out);
TEST_ASSERT_VAL(failure, !strcmp(string, out));
return 0;
}
static int test__mem(struct test_suite *text __maybe_unused, int subtest __maybe_unused)
{
int ret = 0;
union perf_mem_data_src src;
memset(&src, 0, sizeof(src));
src.mem_lvl = PERF_MEM_LVL_HIT;
src.mem_lvl_num = 4;
ret |= check(src, "N/AL4 hit");
src.mem_remote = 1;
ret |= check(src, "N/ARemote L4 hit");
src.mem_lvl = PERF_MEM_LVL_MISS;
src.mem_lvl_num = PERF_MEM_LVLNUM_PMEM;
src.mem_remote = 0;
ret |= check(src, "N/APMEM miss");
src.mem_remote = 1;
ret |= check(src, "N/ARemote PMEM miss");
src.mem_snoopx = PERF_MEM_SNOOPX_FWD;
src.mem_lvl_num = PERF_MEM_LVLNUM_RAM;
ret |= check(src , "FwdRemote RAM miss");
return ret;
}
DEFINE_SUITE("Test data source output", mem);
| linux-master | tools/perf/tests/mem.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "tests.h"
#include "session.h"
#include "debug.h"
#include "demangle-java.h"
static int test__demangle_java(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret = TEST_OK;
char *buf = NULL;
size_t i;
struct {
const char *mangled, *demangled;
} test_cases[] = {
{ "Ljava/lang/StringLatin1;equals([B[B)Z",
"boolean java.lang.StringLatin1.equals(byte[], byte[])" },
{ "Ljava/util/zip/ZipUtils;CENSIZ([BI)J",
"long java.util.zip.ZipUtils.CENSIZ(byte[], int)" },
{ "Ljava/util/regex/Pattern$BmpCharProperty;match(Ljava/util/regex/Matcher;ILjava/lang/CharSequence;)Z",
"boolean java.util.regex.Pattern$BmpCharProperty.match(java.util.regex.Matcher, int, java.lang.CharSequence)" },
{ "Ljava/lang/AbstractStringBuilder;appendChars(Ljava/lang/String;II)V",
"void java.lang.AbstractStringBuilder.appendChars(java.lang.String, int, int)" },
{ "Ljava/lang/Object;<init>()V",
"void java.lang.Object<init>()" },
};
for (i = 0; i < sizeof(test_cases) / sizeof(test_cases[0]); i++) {
buf = java_demangle_sym(test_cases[i].mangled, 0);
if (strcmp(buf, test_cases[i].demangled)) {
pr_debug("FAILED: %s: %s != %s\n", test_cases[i].mangled,
buf, test_cases[i].demangled);
ret = TEST_FAIL;
}
free(buf);
}
return ret;
}
DEFINE_SUITE("Demangle Java", demangle_java);
| linux-master | tools/perf/tests/demangle-java-test.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <perf/cpumap.h>
#include "debug.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "tests.h"
#include "util/mmap.h"
#include "util/sample.h"
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <perf/evlist.h>
#include <perf/mmap.h>
/*
* This test will generate random numbers of calls to some getpid syscalls,
* then establish an mmap for a group of events that are created to monitor
* the syscalls.
*
* It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
* sample.id field to map back to its respective perf_evsel instance.
*
* Then it checks if the number of syscalls reported as perf events by
* the kernel corresponds to the number of syscalls made.
*/
static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
union perf_event *event;
struct perf_thread_map *threads;
struct perf_cpu_map *cpus;
struct evlist *evlist;
cpu_set_t cpu_set;
const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
#define nsyscalls ARRAY_SIZE(syscall_names)
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct evsel *evsels[nsyscalls], *evsel;
char sbuf[STRERR_BUFSIZE];
struct mmap *md;
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
}
cpus = perf_cpu_map__new(NULL);
if (cpus == NULL) {
pr_debug("perf_cpu_map__new\n");
goto out_free_threads;
}
CPU_ZERO(&cpu_set);
CPU_SET(perf_cpu_map__cpu(cpus, 0).cpu, &cpu_set);
sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
perf_cpu_map__cpu(cpus, 0).cpu,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_free_cpus;
}
evlist = evlist__new();
if (evlist == NULL) {
pr_debug("evlist__new\n");
goto out_free_cpus;
}
perf_evlist__set_maps(&evlist->core, cpus, threads);
for (i = 0; i < nsyscalls; ++i) {
char name[64];
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
evsels[i] = evsel__newtp("syscalls", name);
if (IS_ERR(evsels[i])) {
pr_debug("evsel__new(%s)\n", name);
if (PTR_ERR(evsels[i]) == -EACCES) {
/* Permissions failure, flag the failure as a skip. */
err = TEST_SKIP;
}
goto out_delete_evlist;
}
evsels[i]->core.attr.wakeup_events = 1;
evsel__set_sample_id(evsels[i], false);
evlist__add(evlist, evsels[i]);
if (evsel__open(evsels[i], cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
nr_events[i] = 0;
expected_nr_events[i] = 1 + rand() % 127;
}
if (evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
for (i = 0; i < nsyscalls; ++i)
for (j = 0; j < expected_nr_events[i]; ++j) {
syscalls[i]();
}
md = &evlist->mmap[0];
if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE) {
pr_debug("unexpected %s event\n",
perf_event__name(event->header.type));
goto out_delete_evlist;
}
err = evlist__parse_sample(evlist, event, &sample);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_delete_evlist;
}
err = -1;
evsel = evlist__id2evsel(evlist, sample.id);
if (evsel == NULL) {
pr_debug("event with id %" PRIu64
" doesn't map to an evsel\n", sample.id);
goto out_delete_evlist;
}
nr_events[evsel->core.idx]++;
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(&md->core);
out_init:
err = 0;
evlist__for_each_entry(evlist, evsel) {
if (nr_events[evsel->core.idx] != expected_nr_events[evsel->core.idx]) {
pr_debug("expected %d %s events, got %d\n",
expected_nr_events[evsel->core.idx],
evsel__name(evsel), nr_events[evsel->core.idx]);
err = -1;
goto out_delete_evlist;
}
}
out_delete_evlist:
evlist__delete(evlist);
out_free_cpus:
perf_cpu_map__put(cpus);
out_free_threads:
perf_thread_map__put(threads);
return err;
}
static int test_stat_user_read(int event)
{
struct perf_counts_values counts = { .val = 0 };
struct perf_thread_map *threads;
struct perf_evsel *evsel;
struct perf_event_mmap_page *pc;
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = event,
#ifdef __aarch64__
.config1 = 0x2, /* Request user access */
#endif
};
int err, i, ret = TEST_FAIL;
bool opened = false, mapped = false;
threads = perf_thread_map__new_dummy();
TEST_ASSERT_VAL("failed to create threads", threads);
perf_thread_map__set_pid(threads, 0, 0);
evsel = perf_evsel__new(&attr);
TEST_ASSERT_VAL("failed to create evsel", evsel);
err = perf_evsel__open(evsel, NULL, threads);
if (err) {
pr_err("failed to open evsel: %s\n", strerror(-err));
ret = TEST_SKIP;
goto out;
}
opened = true;
err = perf_evsel__mmap(evsel, 0);
if (err) {
pr_err("failed to mmap evsel: %s\n", strerror(-err));
goto out;
}
mapped = true;
pc = perf_evsel__mmap_base(evsel, 0, 0);
if (!pc) {
pr_err("failed to get mmapped address\n");
goto out;
}
if (!pc->cap_user_rdpmc || !pc->index) {
pr_err("userspace counter access not %s\n",
!pc->cap_user_rdpmc ? "supported" : "enabled");
ret = TEST_SKIP;
goto out;
}
if (pc->pmc_width < 32) {
pr_err("userspace counter width not set (%d)\n", pc->pmc_width);
goto out;
}
perf_evsel__read(evsel, 0, 0, &counts);
if (counts.val == 0) {
pr_err("failed to read value for evsel\n");
goto out;
}
for (i = 0; i < 5; i++) {
volatile int count = 0x10000 << i;
__u64 start, end, last = 0;
pr_debug("\tloop = %u, ", count);
perf_evsel__read(evsel, 0, 0, &counts);
start = counts.val;
while (count--) ;
perf_evsel__read(evsel, 0, 0, &counts);
end = counts.val;
if ((end - start) < last) {
pr_err("invalid counter data: end=%llu start=%llu last= %llu\n",
end, start, last);
goto out;
}
last = end - start;
pr_debug("count = %llu\n", end - start);
}
ret = TEST_OK;
out:
if (mapped)
perf_evsel__munmap(evsel);
if (opened)
perf_evsel__close(evsel);
perf_evsel__delete(evsel);
perf_thread_map__put(threads);
return ret;
}
static int test__mmap_user_read_instr(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
}
static int test__mmap_user_read_cycles(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
}
static struct test_case tests__basic_mmap[] = {
TEST_CASE_REASON("Read samples using the mmap interface",
basic_mmap,
"permissions"),
TEST_CASE_REASON("User space counter reading of instructions",
mmap_user_read_instr,
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
(defined(__riscv) && __riscv_xlen == 64)
"permissions"
#else
"unsupported"
#endif
),
TEST_CASE_REASON("User space counter reading of cycles",
mmap_user_read_cycles,
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
(defined(__riscv) && __riscv_xlen == 64)
"permissions"
#else
"unsupported"
#endif
),
{ .name = NULL, }
};
struct test_suite suite__basic_mmap = {
.desc = "mmap interface tests",
.test_cases = tests__basic_mmap,
};
| linux-master | tools/perf/tests/mmap-basic.c |
// SPDX-License-Identifier: GPL-2.0
#include "tests.h"
#include "debug.h"
#include "evlist.h"
#include "cgroup.h"
#include "rblist.h"
#include "metricgroup.h"
#include "parse-events.h"
#include "pmu-events/pmu-events.h"
#include "pfm.h"
#include <subcmd/parse-options.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static int test_expand_events(struct evlist *evlist,
struct rblist *metric_events)
{
int i, ret = TEST_FAIL;
int nr_events;
bool was_group_event;
int nr_members; /* for the first evsel only */
const char cgrp_str[] = "A,B,C";
const char *cgrp_name[] = { "A", "B", "C" };
int nr_cgrps = ARRAY_SIZE(cgrp_name);
char **ev_name;
struct evsel *evsel;
TEST_ASSERT_VAL("evlist is empty", !evlist__empty(evlist));
nr_events = evlist->core.nr_entries;
ev_name = calloc(nr_events, sizeof(*ev_name));
if (ev_name == NULL) {
pr_debug("memory allocation failure\n");
return TEST_FAIL;
}
i = 0;
evlist__for_each_entry(evlist, evsel) {
ev_name[i] = strdup(evsel->name);
if (ev_name[i] == NULL) {
pr_debug("memory allocation failure\n");
goto out;
}
i++;
}
/* remember grouping info */
was_group_event = evsel__is_group_event(evlist__first(evlist));
nr_members = evlist__first(evlist)->core.nr_members;
ret = evlist__expand_cgroup(evlist, cgrp_str, metric_events, false);
if (ret < 0) {
pr_debug("failed to expand events for cgroups\n");
goto out;
}
ret = TEST_FAIL;
if (evlist->core.nr_entries != nr_events * nr_cgrps) {
pr_debug("event count doesn't match\n");
goto out;
}
i = 0;
evlist__for_each_entry(evlist, evsel) {
if (!evsel__name_is(evsel, ev_name[i % nr_events])) {
pr_debug("event name doesn't match:\n");
pr_debug(" evsel[%d]: %s\n expected: %s\n",
i, evsel->name, ev_name[i % nr_events]);
goto out;
}
if (strcmp(evsel->cgrp->name, cgrp_name[i / nr_events])) {
pr_debug("cgroup name doesn't match:\n");
pr_debug(" evsel[%d]: %s\n expected: %s\n",
i, evsel->cgrp->name, cgrp_name[i / nr_events]);
goto out;
}
if ((i % nr_events) == 0) {
if (evsel__is_group_event(evsel) != was_group_event) {
pr_debug("event group doesn't match: got %s, expect %s\n",
evsel__is_group_event(evsel) ? "true" : "false",
was_group_event ? "true" : "false");
goto out;
}
if (evsel->core.nr_members != nr_members) {
pr_debug("event group member doesn't match: %d vs %d\n",
evsel->core.nr_members, nr_members);
goto out;
}
}
i++;
}
ret = TEST_OK;
out: for (i = 0; i < nr_events; i++)
free(ev_name[i]);
free(ev_name);
return ret;
}
static int expand_default_events(void)
{
int ret;
struct rblist metric_events;
struct evlist *evlist = evlist__new_default();
TEST_ASSERT_VAL("failed to get evlist", evlist);
rblist__init(&metric_events);
ret = test_expand_events(evlist, &metric_events);
evlist__delete(evlist);
return ret;
}
static int expand_group_events(void)
{
int ret;
struct evlist *evlist;
struct rblist metric_events;
struct parse_events_error err;
const char event_str[] = "{cycles,instructions}";
symbol_conf.event_group = true;
evlist = evlist__new();
TEST_ASSERT_VAL("failed to get evlist", evlist);
parse_events_error__init(&err);
ret = parse_events(evlist, event_str, &err);
if (ret < 0) {
pr_debug("failed to parse event '%s', err %d, str '%s'\n",
event_str, ret, err.str);
parse_events_error__print(&err, event_str);
goto out;
}
rblist__init(&metric_events);
ret = test_expand_events(evlist, &metric_events);
out:
parse_events_error__exit(&err);
evlist__delete(evlist);
return ret;
}
static int expand_libpfm_events(void)
{
int ret;
struct evlist *evlist;
struct rblist metric_events;
const char event_str[] = "CYCLES";
struct option opt = {
.value = &evlist,
};
symbol_conf.event_group = true;
evlist = evlist__new();
TEST_ASSERT_VAL("failed to get evlist", evlist);
ret = parse_libpfm_events_option(&opt, event_str, 0);
if (ret < 0) {
pr_debug("failed to parse libpfm event '%s', err %d\n",
event_str, ret);
goto out;
}
if (evlist__empty(evlist)) {
pr_debug("libpfm was not enabled\n");
goto out;
}
rblist__init(&metric_events);
ret = test_expand_events(evlist, &metric_events);
out:
evlist__delete(evlist);
return ret;
}
static int expand_metric_events(void)
{
int ret;
struct evlist *evlist;
struct rblist metric_events;
const char metric_str[] = "CPI";
const struct pmu_metrics_table *pme_test;
evlist = evlist__new();
TEST_ASSERT_VAL("failed to get evlist", evlist);
rblist__init(&metric_events);
pme_test = find_core_metrics_table("testarch", "testcpu");
ret = metricgroup__parse_groups_test(evlist, pme_test, metric_str, &metric_events);
if (ret < 0) {
pr_debug("failed to parse '%s' metric\n", metric_str);
goto out;
}
ret = test_expand_events(evlist, &metric_events);
out:
metricgroup__rblist_exit(&metric_events);
evlist__delete(evlist);
return ret;
}
static int test__expand_cgroup_events(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int ret;
ret = expand_default_events();
TEST_ASSERT_EQUAL("failed to expand default events", ret, 0);
ret = expand_group_events();
TEST_ASSERT_EQUAL("failed to expand event group", ret, 0);
ret = expand_libpfm_events();
TEST_ASSERT_EQUAL("failed to expand event group", ret, 0);
ret = expand_metric_events();
TEST_ASSERT_EQUAL("failed to expand metric events", ret, 0);
return ret;
}
DEFINE_SUITE("Event expansion for cgroups", expand_cgroup_events);
| linux-master | tools/perf/tests/expand-cgroup.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/epoll.h>
#include <util/symbol.h>
#include <linux/filter.h>
#include "tests.h"
#include "debug.h"
#include "probe-file.h"
#include "build-id.h"
#include "util.h"
/* To test SDT event, we need libelf support to scan elf binary */
#if defined(HAVE_SDT_EVENT) && defined(HAVE_LIBELF_SUPPORT)
#include <sys/sdt.h>
static int target_function(void)
{
DTRACE_PROBE(perf, test_target);
return TEST_OK;
}
/* Copied from builtin-buildid-cache.c */
static int build_id_cache__add_file(const char *filename)
{
char sbuild_id[SBUILD_ID_SIZE];
struct build_id bid;
int err;
err = filename__read_build_id(filename, &bid);
if (err < 0) {
pr_debug("Failed to read build id of %s\n", filename);
return err;
}
build_id__sprintf(&bid, sbuild_id);
err = build_id_cache__add_s(sbuild_id, filename, NULL, false, false);
if (err < 0)
pr_debug("Failed to add build id cache of %s\n", filename);
return err;
}
static char *get_self_path(void)
{
char *buf = calloc(PATH_MAX, sizeof(char));
if (buf && readlink("/proc/self/exe", buf, PATH_MAX - 1) < 0) {
pr_debug("Failed to get correct path of perf\n");
free(buf);
return NULL;
}
return buf;
}
static int search_cached_probe(const char *target,
const char *group, const char *event)
{
struct probe_cache *cache = probe_cache__new(target, NULL);
int ret = 0;
if (!cache) {
pr_debug("Failed to open probe cache of %s\n", target);
return -EINVAL;
}
if (!probe_cache__find_by_name(cache, group, event)) {
pr_debug("Failed to find %s:%s in the cache\n", group, event);
ret = -ENOENT;
}
probe_cache__delete(cache);
return ret;
}
static int test__sdt_event(struct test_suite *test __maybe_unused, int subtests __maybe_unused)
{
int ret = TEST_FAIL;
char __tempdir[] = "./test-buildid-XXXXXX";
char *tempdir = NULL, *myself = get_self_path();
if (myself == NULL || mkdtemp(__tempdir) == NULL) {
pr_debug("Failed to make a tempdir for build-id cache\n");
goto error;
}
/* Note that buildid_dir must be an absolute path */
tempdir = realpath(__tempdir, NULL);
if (tempdir == NULL)
goto error_rmdir;
/* At first, scan itself */
set_buildid_dir(tempdir);
if (build_id_cache__add_file(myself) < 0)
goto error_rmdir;
/* Open a cache and make sure the SDT is stored */
if (search_cached_probe(myself, "sdt_perf", "test_target") < 0)
goto error_rmdir;
/* TBD: probing on the SDT event and collect logs */
/* Call the target and get an event */
ret = target_function();
error_rmdir:
/* Cleanup temporary buildid dir */
rm_rf(__tempdir);
error:
free(tempdir);
free(myself);
return ret;
}
#else
static int test__sdt_event(struct test_suite *test __maybe_unused, int subtests __maybe_unused)
{
pr_debug("Skip SDT event test because SDT support is not compiled\n");
return TEST_SKIP;
}
#endif
DEFINE_SUITE("Probe SDT events", sdt_event);
| linux-master | tools/perf/tests/sdt.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include "tests.h"
#include "map.h"
#include "maps.h"
#include "dso.h"
#include "debug.h"
struct map_def {
const char *name;
u64 start;
u64 end;
};
static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps)
{
struct map_rb_node *rb_node;
unsigned int i = 0;
bool failed = false;
if (maps__nr_maps(maps) != size) {
pr_debug("Expected %d maps, got %d", size, maps__nr_maps(maps));
failed = true;
} else {
maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
if (map__start(map) != merged[i].start ||
map__end(map) != merged[i].end ||
strcmp(map__dso(map)->name, merged[i].name) ||
refcount_read(map__refcnt(map)) != 1) {
failed = true;
}
i++;
}
}
if (failed) {
pr_debug("Expected:\n");
for (i = 0; i < size; i++) {
pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: 1\n",
merged[i].start, merged[i].end, merged[i].name);
}
pr_debug("Got:\n");
maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n",
map__start(map),
map__end(map),
map__dso(map)->name,
refcount_read(map__refcnt(map)));
}
}
return failed ? TEST_FAIL : TEST_OK;
}
static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
unsigned int i;
struct map_def bpf_progs[] = {
{ "bpf_prog_1", 200, 300 },
{ "bpf_prog_2", 500, 600 },
{ "bpf_prog_3", 800, 900 },
};
struct map_def merged12[] = {
{ "kcore1", 100, 200 },
{ "bpf_prog_1", 200, 300 },
{ "kcore1", 300, 500 },
{ "bpf_prog_2", 500, 600 },
{ "kcore1", 600, 800 },
{ "bpf_prog_3", 800, 900 },
{ "kcore1", 900, 1000 },
};
struct map_def merged3[] = {
{ "kcore1", 100, 200 },
{ "bpf_prog_1", 200, 300 },
{ "kcore1", 300, 500 },
{ "bpf_prog_2", 500, 600 },
{ "kcore1", 600, 800 },
{ "bpf_prog_3", 800, 900 },
{ "kcore1", 900, 1000 },
{ "kcore3", 1000, 1100 },
};
struct map *map_kcore1, *map_kcore2, *map_kcore3;
int ret;
struct maps *maps = maps__new(NULL);
TEST_ASSERT_VAL("failed to create maps", maps);
for (i = 0; i < ARRAY_SIZE(bpf_progs); i++) {
struct map *map;
map = dso__new_map(bpf_progs[i].name);
TEST_ASSERT_VAL("failed to create map", map);
map__set_start(map, bpf_progs[i].start);
map__set_end(map, bpf_progs[i].end);
TEST_ASSERT_VAL("failed to insert map", maps__insert(maps, map) == 0);
map__put(map);
}
map_kcore1 = dso__new_map("kcore1");
TEST_ASSERT_VAL("failed to create map", map_kcore1);
map_kcore2 = dso__new_map("kcore2");
TEST_ASSERT_VAL("failed to create map", map_kcore2);
map_kcore3 = dso__new_map("kcore3");
TEST_ASSERT_VAL("failed to create map", map_kcore3);
/* kcore1 map overlaps over all bpf maps */
map__set_start(map_kcore1, 100);
map__set_end(map_kcore1, 1000);
/* kcore2 map hides behind bpf_prog_2 */
map__set_start(map_kcore2, 550);
map__set_end(map_kcore2, 570);
/* kcore3 map hides behind bpf_prog_3, kcore1 and adds new map */
map__set_start(map_kcore3, 880);
map__set_end(map_kcore3, 1100);
ret = maps__merge_in(maps, map_kcore1);
TEST_ASSERT_VAL("failed to merge map", !ret);
ret = check_maps(merged12, ARRAY_SIZE(merged12), maps);
TEST_ASSERT_VAL("merge check failed", !ret);
ret = maps__merge_in(maps, map_kcore2);
TEST_ASSERT_VAL("failed to merge map", !ret);
ret = check_maps(merged12, ARRAY_SIZE(merged12), maps);
TEST_ASSERT_VAL("merge check failed", !ret);
ret = maps__merge_in(maps, map_kcore3);
TEST_ASSERT_VAL("failed to merge map", !ret);
ret = check_maps(merged3, ARRAY_SIZE(merged3), maps);
TEST_ASSERT_VAL("merge check failed", !ret);
maps__zput(maps);
return TEST_OK;
}
DEFINE_SUITE("maps__merge_in", maps__merge_in);
| linux-master | tools/perf/tests/maps.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/debug.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/machine.h"
#include "util/parse-events.h"
#include "util/thread.h"
#include "tests/tests.h"
#include "tests/hists_common.h"
#include <linux/kernel.h>
struct sample {
u32 pid;
u64 ip;
struct thread *thread;
struct map *map;
struct symbol *sym;
int socket;
};
/* For the numbers, see hists_common.c */
static struct sample fake_samples[] = {
/* perf [kernel] schedule() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, .socket = 0 },
/* perf [perf] main() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, .socket = 0 },
/* perf [libc] malloc() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, .socket = 0 },
/* perf [perf] main() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, .socket = 0 }, /* will be merged */
/* perf [perf] cmd_record() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, .socket = 1 },
/* perf [kernel] page_fault() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 1 },
/* bash [bash] main() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, .socket = 2 },
/* bash [bash] xmalloc() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, .socket = 2 },
/* bash [libc] malloc() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, .socket = 3 },
/* bash [kernel] page_fault() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 3 },
};
static int add_hist_entries(struct evlist *evlist,
struct machine *machine)
{
struct evsel *evsel;
struct addr_location al;
struct perf_sample sample = { .period = 100, };
size_t i;
addr_location__init(&al);
/*
* each evsel will have 10 samples but the 4th sample
* (perf [perf] main) will be collapsed to an existing entry
* so total 9 entries will be in the tree.
*/
evlist__for_each_entry(evlist, evsel) {
for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = &sample,
.ops = &hist_iter_normal,
.hide_unresolved = false,
};
struct hists *hists = evsel__hists(evsel);
/* make sure it has no filter at first */
hists->thread_filter = NULL;
hists->dso_filter = NULL;
hists->symbol_filter_str = NULL;
sample.cpumode = PERF_RECORD_MISC_USER;
sample.pid = fake_samples[i].pid;
sample.tid = fake_samples[i].pid;
sample.ip = fake_samples[i].ip;
if (machine__resolve(machine, &al, &sample) < 0)
goto out;
al.socket = fake_samples[i].socket;
if (hist_entry_iter__add(&iter, &al,
sysctl_perf_event_max_stack, NULL) < 0) {
goto out;
}
thread__put(fake_samples[i].thread);
fake_samples[i].thread = thread__get(al.thread);
map__put(fake_samples[i].map);
fake_samples[i].map = map__get(al.map);
fake_samples[i].sym = al.sym;
}
}
addr_location__exit(&al);
return 0;
out:
pr_debug("Not enough memory for adding a hist entry\n");
addr_location__exit(&al);
return TEST_FAIL;
}
static void put_fake_samples(void)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(fake_samples); i++)
map__put(fake_samples[i].map);
}
static int test__hists_filter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
struct machine *machine;
struct evsel *evsel;
struct evlist *evlist = evlist__new();
TEST_ASSERT_VAL("No memory", evlist);
err = parse_event(evlist, "cpu-clock");
if (err)
goto out;
err = parse_event(evlist, "task-clock");
if (err)
goto out;
err = TEST_FAIL;
/* default sort order (comm,dso,sym) will be used */
if (setup_sorting(NULL) < 0)
goto out;
machines__init(&machines);
/* setup threads/dso/map/symbols also */
machine = setup_fake_machine(&machines);
if (!machine)
goto out;
if (verbose > 1)
machine__fprintf(machine, stderr);
/* process sample events */
err = add_hist_entries(evlist, machine);
if (err < 0)
goto out;
evlist__for_each_entry(evlist, evsel) {
struct hists *hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("Normal histogram\n");
print_hists_out(hists);
}
TEST_ASSERT_VAL("Invalid nr samples",
hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
hists->stats.total_period == 1000);
TEST_ASSERT_VAL("Unmatched nr samples",
hists->stats.nr_samples ==
hists->stats.nr_non_filtered_samples);
TEST_ASSERT_VAL("Unmatched nr hist entries",
hists->nr_entries == hists->nr_non_filtered_entries);
TEST_ASSERT_VAL("Unmatched total period",
hists->stats.total_period ==
hists->stats.total_non_filtered_period);
/* now applying thread filter for 'bash' */
hists->thread_filter = fake_samples[9].thread;
hists__filter_by_thread(hists);
if (verbose > 2) {
pr_info("Histogram for thread filter\n");
print_hists_out(hists);
}
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
hists->stats.total_period == 1000);
/* but filter stats are changed */
TEST_ASSERT_VAL("Unmatched nr samples for thread filter",
hists->stats.nr_non_filtered_samples == 4);
TEST_ASSERT_VAL("Unmatched nr hist entries for thread filter",
hists->nr_non_filtered_entries == 4);
TEST_ASSERT_VAL("Unmatched total period for thread filter",
hists->stats.total_non_filtered_period == 400);
/* remove thread filter first */
hists->thread_filter = NULL;
hists__filter_by_thread(hists);
/* now applying dso filter for 'kernel' */
hists->dso_filter = map__dso(fake_samples[0].map);
hists__filter_by_dso(hists);
if (verbose > 2) {
pr_info("Histogram for dso filter\n");
print_hists_out(hists);
}
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
hists->stats.total_period == 1000);
/* but filter stats are changed */
TEST_ASSERT_VAL("Unmatched nr samples for dso filter",
hists->stats.nr_non_filtered_samples == 3);
TEST_ASSERT_VAL("Unmatched nr hist entries for dso filter",
hists->nr_non_filtered_entries == 3);
TEST_ASSERT_VAL("Unmatched total period for dso filter",
hists->stats.total_non_filtered_period == 300);
/* remove dso filter first */
hists->dso_filter = NULL;
hists__filter_by_dso(hists);
/*
* now applying symbol filter for 'main'. Also note that
* there's 3 samples that have 'main' symbol but the 4th
* entry of fake_samples was collapsed already so it won't
* be counted as a separate entry but the sample count and
* total period will be remained.
*/
hists->symbol_filter_str = "main";
hists__filter_by_symbol(hists);
if (verbose > 2) {
pr_info("Histogram for symbol filter\n");
print_hists_out(hists);
}
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
hists->stats.total_period == 1000);
/* but filter stats are changed */
TEST_ASSERT_VAL("Unmatched nr samples for symbol filter",
hists->stats.nr_non_filtered_samples == 3);
TEST_ASSERT_VAL("Unmatched nr hist entries for symbol filter",
hists->nr_non_filtered_entries == 2);
TEST_ASSERT_VAL("Unmatched total period for symbol filter",
hists->stats.total_non_filtered_period == 300);
/* remove symbol filter first */
hists->symbol_filter_str = NULL;
hists__filter_by_symbol(hists);
/* now applying socket filters */
hists->socket_filter = 2;
hists__filter_by_socket(hists);
if (verbose > 2) {
pr_info("Histogram for socket filters\n");
print_hists_out(hists);
}
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
hists->stats.total_period == 1000);
/* but filter stats are changed */
TEST_ASSERT_VAL("Unmatched nr samples for socket filter",
hists->stats.nr_non_filtered_samples == 2);
TEST_ASSERT_VAL("Unmatched nr hist entries for socket filter",
hists->nr_non_filtered_entries == 2);
TEST_ASSERT_VAL("Unmatched total period for socket filter",
hists->stats.total_non_filtered_period == 200);
/* remove socket filter first */
hists->socket_filter = -1;
hists__filter_by_socket(hists);
/* now applying all filters at once. */
hists->thread_filter = fake_samples[1].thread;
hists->dso_filter = map__dso(fake_samples[1].map);
hists__filter_by_thread(hists);
hists__filter_by_dso(hists);
if (verbose > 2) {
pr_info("Histogram for all filters\n");
print_hists_out(hists);
}
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
hists->stats.total_period == 1000);
/* but filter stats are changed */
TEST_ASSERT_VAL("Unmatched nr samples for all filter",
hists->stats.nr_non_filtered_samples == 2);
TEST_ASSERT_VAL("Unmatched nr hist entries for all filter",
hists->nr_non_filtered_entries == 1);
TEST_ASSERT_VAL("Unmatched total period for all filter",
hists->stats.total_non_filtered_period == 200);
}
err = TEST_OK;
out:
/* tear down everything */
evlist__delete(evlist);
reset_output_field();
machines__exit(&machines);
put_fake_samples();
return err;
}
DEFINE_SUITE("Filter hist entries", hists_filter);
| linux-master | tools/perf/tests/hists_filter.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <pthread.h>
#include <stdlib.h>
#include <stdio.h>
#include "debug.h"
#include "event.h"
#include "tests.h"
#include "machine.h"
#include "thread_map.h"
#include "map.h"
#include "symbol.h"
#include "util/synthetic-events.h"
#include "thread.h"
#include <internal/lib.h> // page_size
#define THREADS 4
static int go_away;
struct thread_data {
pthread_t pt;
pid_t tid;
void *map;
int ready[2];
};
static struct thread_data threads[THREADS];
static int thread_init(struct thread_data *td)
{
void *map;
map = mmap(NULL, page_size,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_SHARED|MAP_ANONYMOUS, -1, 0);
if (map == MAP_FAILED) {
perror("mmap failed");
return -1;
}
td->map = map;
td->tid = syscall(SYS_gettid);
pr_debug("tid = %d, map = %p\n", td->tid, map);
return 0;
}
static void *thread_fn(void *arg)
{
struct thread_data *td = arg;
ssize_t ret;
int go = 0;
if (thread_init(td))
return NULL;
/* Signal thread_create thread is initialized. */
ret = write(td->ready[1], &go, sizeof(int));
if (ret != sizeof(int)) {
pr_err("failed to notify\n");
return NULL;
}
while (!go_away) {
/* Waiting for main thread to kill us. */
usleep(100);
}
munmap(td->map, page_size);
return NULL;
}
static int thread_create(int i)
{
struct thread_data *td = &threads[i];
int err, go;
if (pipe(td->ready))
return -1;
err = pthread_create(&td->pt, NULL, thread_fn, td);
if (!err) {
/* Wait for thread initialization. */
ssize_t ret = read(td->ready[0], &go, sizeof(int));
err = ret != sizeof(int);
}
close(td->ready[0]);
close(td->ready[1]);
return err;
}
static int threads_create(void)
{
struct thread_data *td0 = &threads[0];
int i, err = 0;
go_away = 0;
/* 0 is main thread */
if (thread_init(td0))
return -1;
for (i = 1; !err && i < THREADS; i++)
err = thread_create(i);
return err;
}
static int threads_destroy(void)
{
struct thread_data *td0 = &threads[0];
int i, err = 0;
/* cleanup the main thread */
munmap(td0->map, page_size);
go_away = 1;
for (i = 1; !err && i < THREADS; i++)
err = pthread_join(threads[i].pt, NULL);
return err;
}
typedef int (*synth_cb)(struct machine *machine);
static int synth_all(struct machine *machine)
{
return perf_event__synthesize_threads(NULL,
perf_event__process,
machine, 1, 0, 1);
}
static int synth_process(struct machine *machine)
{
struct perf_thread_map *map;
int err;
map = thread_map__new_by_pid(getpid());
err = perf_event__synthesize_thread_map(NULL, map,
perf_event__process,
machine, 1, 0);
perf_thread_map__put(map);
return err;
}
static int mmap_events(synth_cb synth)
{
struct machine *machine;
int err, i;
/*
* The threads_create will not return before all threads
* are spawned and all created memory map.
*
* They will loop until threads_destroy is called, so we
* can safely run synthesizing function.
*/
TEST_ASSERT_VAL("failed to create threads", !threads_create());
machine = machine__new_host();
dump_trace = verbose > 1 ? 1 : 0;
err = synth(machine);
dump_trace = 0;
TEST_ASSERT_VAL("failed to destroy threads", !threads_destroy());
TEST_ASSERT_VAL("failed to synthesize maps", !err);
/*
* All data is synthesized, try to find map for each
* thread object.
*/
for (i = 0; i < THREADS; i++) {
struct thread_data *td = &threads[i];
struct addr_location al;
struct thread *thread;
addr_location__init(&al);
thread = machine__findnew_thread(machine, getpid(), td->tid);
pr_debug("looking for map %p\n", td->map);
thread__find_map(thread, PERF_RECORD_MISC_USER,
(unsigned long) (td->map + 1), &al);
thread__put(thread);
if (!al.map) {
pr_debug("failed, couldn't find map\n");
err = -1;
addr_location__exit(&al);
break;
}
pr_debug("map %p, addr %" PRIx64 "\n", al.map, map__start(al.map));
addr_location__exit(&al);
}
machine__delete(machine);
return err;
}
/*
* This test creates 'THREADS' number of threads (including
* main thread) and each thread creates memory map.
*
* When threads are created, we synthesize them with both
* (separate tests):
* perf_event__synthesize_thread_map (process based)
* perf_event__synthesize_threads (global)
*
* We test we can find all memory maps via:
* thread__find_map
*
* by using all thread objects.
*/
static int test__mmap_thread_lookup(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
/* perf_event__synthesize_threads synthesize */
TEST_ASSERT_VAL("failed with sythesizing all",
!mmap_events(synth_all));
/* perf_event__synthesize_thread_map synthesize */
TEST_ASSERT_VAL("failed with sythesizing process",
!mmap_events(synth_process));
return 0;
}
DEFINE_SUITE("Lookup mmap thread", mmap_thread_lookup);
| linux-master | tools/perf/tests/mmap-thread-lookup.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <linux/string.h>
#include <sched.h>
#include <perf/mmap.h>
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
#include "record.h"
#include "tests.h"
#include "util/mmap.h"
#include "util/sample.h"
static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
int i, cpu = -1, nrcpus = 1024;
realloc:
CPU_ZERO(maskp);
if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
if (errno == EINVAL && nrcpus < (1024 << 8)) {
nrcpus = nrcpus << 2;
goto realloc;
}
perror("sched_getaffinity");
return -1;
}
for (i = 0; i < nrcpus; i++) {
if (CPU_ISSET(i, maskp)) {
if (cpu == -1)
cpu = i;
else
CPU_CLR(i, maskp);
}
}
return cpu;
}
static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct record_opts opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
.no_buffering = true,
.mmap_pages = 256,
};
cpu_set_t cpu_mask;
size_t cpu_mask_size = sizeof(cpu_mask);
struct evlist *evlist = evlist__new_dummy();
struct evsel *evsel;
struct perf_sample sample;
const char *cmd = "sleep";
const char *argv[] = { cmd, "1", NULL, };
char *bname, *mmap_filename;
u64 prev_time = 0;
bool found_cmd_mmap = false,
found_coreutils_mmap = false,
found_libc_mmap = false,
found_vdso_mmap = false,
found_ld_mmap = false;
int err = -1, errs = 0, i, wakeups = 0;
u32 cpu;
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
char sbuf[STRERR_BUFSIZE];
if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
evlist = evlist__new_default();
if (evlist == NULL) {
pr_debug("Not enough memory to create evlist\n");
goto out;
}
/*
* Create maps of threads and cpus to monitor. In this case
* we start with all threads and cpus (-1, -1) but then in
* evlist__prepare_workload we'll fill in the only thread
* we're monitoring, the one forked there.
*/
err = evlist__create_maps(evlist, &opts.target);
if (err < 0) {
pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_evlist;
}
/*
* Prepare the workload in argv[] to run, it'll fork it, and then wait
* for evlist__start_workload() to exec it. This is done this way
* so that we have time to open the evlist (calling sys_perf_event_open
* on all the fds) and then mmap them.
*/
err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
goto out_delete_evlist;
}
/*
* Config the evsels, setting attr->comm on the first one, etc.
*/
evsel = evlist__first(evlist);
evsel__set_sample_bit(evsel, CPU);
evsel__set_sample_bit(evsel, TID);
evsel__set_sample_bit(evsel, TIME);
evlist__config(evlist, &opts, NULL);
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) {
pr_debug("sched__get_first_possible_cpu: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
cpu = err;
/*
* So that we can check perf_sample.cpu on all the samples.
*/
if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
pr_debug("sched_setaffinity: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
/*
* Call sys_perf_event_open on all the fds on all the evsels,
* grouping them if asked to.
*/
err = evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
/*
* mmap the first fd on a given CPU and ask for events for the other
* fds in the same CPU to be injected in the same mmap ring buffer
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
*/
err = evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
/*
* Now that all is properly set up, enable the events, they will
* count just on workload.pid, which will start...
*/
evlist__enable(evlist);
/*
* Now!
*/
evlist__start_workload(evlist);
while (1) {
int before = total_events;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
struct mmap *md;
md = &evlist->mmap[i];
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
const char *name = perf_event__name(type);
++total_events;
if (type < PERF_RECORD_MAX)
nr_events[type]++;
err = evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
if (verbose > 0)
perf_event__fprintf(event, NULL, stderr);
pr_debug("Couldn't parse sample\n");
goto out_delete_evlist;
}
if (verbose > 0) {
pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
perf_event__fprintf(event, NULL, stderr);
}
if (prev_time > sample.time) {
pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
name, prev_time, sample.time);
++errs;
}
prev_time = sample.time;
if (sample.cpu != cpu) {
pr_debug("%s with unexpected cpu, expected %d, got %d\n",
name, cpu, sample.cpu);
++errs;
}
if ((pid_t)sample.pid != evlist->workload.pid) {
pr_debug("%s with unexpected pid, expected %d, got %d\n",
name, evlist->workload.pid, sample.pid);
++errs;
}
if ((pid_t)sample.tid != evlist->workload.pid) {
pr_debug("%s with unexpected tid, expected %d, got %d\n",
name, evlist->workload.pid, sample.tid);
++errs;
}
if ((type == PERF_RECORD_COMM ||
type == PERF_RECORD_MMAP ||
type == PERF_RECORD_MMAP2 ||
type == PERF_RECORD_FORK ||
type == PERF_RECORD_EXIT) &&
(pid_t)event->comm.pid != evlist->workload.pid) {
pr_debug("%s with unexpected pid/tid\n", name);
++errs;
}
if ((type == PERF_RECORD_COMM ||
type == PERF_RECORD_MMAP ||
type == PERF_RECORD_MMAP2) &&
event->comm.pid != event->comm.tid) {
pr_debug("%s with different pid/tid!\n", name);
++errs;
}
switch (type) {
case PERF_RECORD_COMM:
if (strcmp(event->comm.comm, cmd)) {
pr_debug("%s with unexpected comm!\n", name);
++errs;
}
break;
case PERF_RECORD_EXIT:
goto found_exit;
case PERF_RECORD_MMAP:
mmap_filename = event->mmap.filename;
goto check_bname;
case PERF_RECORD_MMAP2:
mmap_filename = event->mmap2.filename;
check_bname:
bname = strrchr(mmap_filename, '/');
if (bname != NULL) {
if (!found_cmd_mmap)
found_cmd_mmap = !strcmp(bname + 1, cmd);
if (!found_coreutils_mmap)
found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
if (!found_libc_mmap)
found_libc_mmap = !strncmp(bname + 1, "libc", 4);
if (!found_ld_mmap)
found_ld_mmap = !strncmp(bname + 1, "ld", 2);
} else if (!found_vdso_mmap)
found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
break;
case PERF_RECORD_SAMPLE:
/* Just ignore samples for now */
break;
default:
pr_debug("Unexpected perf_event->header.type %d!\n",
type);
++errs;
}
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(&md->core);
}
/*
* We don't use poll here because at least at 3.1 times the
* PERF_RECORD_{!SAMPLE} events don't honour
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
*/
if (total_events == before && false)
evlist__poll(evlist, -1);
sleep(1);
if (++wakeups > 5) {
pr_debug("No PERF_RECORD_EXIT event!\n");
break;
}
}
found_exit:
if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
++errs;
}
if (nr_events[PERF_RECORD_COMM] == 0) {
pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
++errs;
}
if (!found_cmd_mmap && !found_coreutils_mmap) {
pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
++errs;
}
if (!found_libc_mmap) {
pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
++errs;
}
if (!found_ld_mmap) {
pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
++errs;
}
if (!found_vdso_mmap) {
pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
++errs;
}
out_delete_evlist:
evlist__delete(evlist);
out:
if (err == -EACCES)
return TEST_SKIP;
if (err < 0 || errs != 0)
return TEST_FAIL;
return TEST_OK;
}
static struct test_case tests__PERF_RECORD[] = {
TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields",
PERF_RECORD,
"permissions"),
{ .name = NULL, }
};
struct test_suite suite__PERF_RECORD = {
.desc = "PERF_RECORD_* events & perf_sample fields",
.test_cases = tests__PERF_RECORD,
};
| linux-master | tools/perf/tests/perf-record.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include <inttypes.h>
#include <limits.h>
#include <unistd.h>
#include "tests.h"
#include "debug.h"
#include "machine.h"
#include "event.h"
#include "../util/unwind.h"
#include "perf_regs.h"
#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "callchain.h"
#include "util/synthetic-events.h"
/* For bsearch. We try to unwind functions in shared object. */
#include <stdlib.h>
/*
* The test will assert frames are on the stack but tail call optimizations lose
* the frame of the caller. Clang can disable this optimization on a called
* function but GCC currently (11/2020) lacks this attribute. The barrier is
* used to inhibit tail calls in these cases.
*/
#ifdef __has_attribute
#if __has_attribute(disable_tail_calls)
#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
#define NO_TAIL_CALL_BARRIER
#endif
#endif
#ifndef NO_TAIL_CALL_ATTRIBUTE
#define NO_TAIL_CALL_ATTRIBUTE
#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
#endif
static int mmap_handler(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
return machine__process_mmap2_event(machine, event, sample);
}
static int init_live_machine(struct machine *machine)
{
union perf_event event;
pid_t pid = getpid();
memset(&event, 0, sizeof(event));
return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
mmap_handler, machine, true);
}
/*
* We need to keep these functions global, despite the
* fact that they are used only locally in this object,
* in order to keep them around even if the binary is
* stripped. If they are gone, the unwind check for
* symbol fails.
*/
int test_dwarf_unwind__thread(struct thread *thread);
int test_dwarf_unwind__compare(void *p1, void *p2);
int test_dwarf_unwind__krava_3(struct thread *thread);
int test_dwarf_unwind__krava_2(struct thread *thread);
int test_dwarf_unwind__krava_1(struct thread *thread);
int test__dwarf_unwind(struct test_suite *test, int subtest);
#define MAX_STACK 8
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
unsigned long *cnt = (unsigned long *) arg;
char *symbol = entry->ms.sym ? entry->ms.sym->name : NULL;
static const char *funcs[MAX_STACK] = {
"test__arch_unwind_sample",
"test_dwarf_unwind__thread",
"test_dwarf_unwind__compare",
"bsearch",
"test_dwarf_unwind__krava_3",
"test_dwarf_unwind__krava_2",
"test_dwarf_unwind__krava_1",
"test__dwarf_unwind"
};
/*
* The funcs[MAX_STACK] array index, based on the
* callchain order setup.
*/
int idx = callchain_param.order == ORDER_CALLER ?
MAX_STACK - *cnt - 1 : *cnt;
if (*cnt >= MAX_STACK) {
pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
return -1;
}
if (!symbol) {
pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
entry->ip);
return -1;
}
(*cnt)++;
pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
symbol, entry->ip, funcs[idx]);
return strcmp((const char *) symbol, funcs[idx]);
}
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
{
struct perf_sample sample;
unsigned long cnt = 0;
int err = -1;
memset(&sample, 0, sizeof(sample));
if (test__arch_unwind_sample(&sample, thread)) {
pr_debug("failed to get unwind sample\n");
goto out;
}
err = unwind__get_entries(unwind_entry, &cnt, thread,
&sample, MAX_STACK, false);
if (err)
pr_debug("unwind failed\n");
else if (cnt != MAX_STACK) {
pr_debug("got wrong number of stack entries %lu != %d\n",
cnt, MAX_STACK);
err = -1;
}
out:
zfree(&sample.user_stack.data);
zfree(&sample.user_regs.regs);
return err;
}
static int global_unwind_retval = -INT_MAX;
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
{
/* Any possible value should be 'thread' */
struct thread *thread = *(struct thread **)p1;
if (global_unwind_retval == -INT_MAX) {
/* Call unwinder twice for both callchain orders. */
callchain_param.order = ORDER_CALLER;
global_unwind_retval = test_dwarf_unwind__thread(thread);
if (!global_unwind_retval) {
callchain_param.order = ORDER_CALLEE;
global_unwind_retval = test_dwarf_unwind__thread(thread);
}
}
return p1 - p2;
}
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
{
struct thread *array[2] = {thread, thread};
void *fp = &bsearch;
/*
* make _bsearch a volatile function pointer to
* prevent potential optimization, which may expand
* bsearch and call compare directly from this function,
* instead of libc shared object.
*/
void *(*volatile _bsearch)(void *, void *, size_t,
size_t, int (*)(void *, void *));
_bsearch = fp;
_bsearch(array, &thread, 2, sizeof(struct thread **),
test_dwarf_unwind__compare);
return global_unwind_retval;
}
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
{
int ret;
ret = test_dwarf_unwind__krava_3(thread);
NO_TAIL_CALL_BARRIER;
return ret;
}
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
{
int ret;
ret = test_dwarf_unwind__krava_2(thread);
NO_TAIL_CALL_BARRIER;
return ret;
}
noinline int test__dwarf_unwind(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct machine *machine;
struct thread *thread;
int err = -1;
machine = machine__new_host();
if (!machine) {
pr_err("Could not get machine\n");
return -1;
}
if (machine__create_kernel_maps(machine)) {
pr_err("Failed to create kernel maps\n");
return -1;
}
callchain_param.record_mode = CALLCHAIN_DWARF;
dwarf_callchain_users = true;
if (init_live_machine(machine)) {
pr_err("Could not init machine\n");
goto out;
}
if (verbose > 1)
machine__fprintf(machine, stderr);
thread = machine__find_thread(machine, getpid(), getpid());
if (!thread) {
pr_err("Could not get thread\n");
goto out;
}
err = test_dwarf_unwind__krava_1(thread);
thread__put(thread);
out:
machine__delete(machine);
return err;
}
DEFINE_SUITE("Test dwarf unwind", dwarf_unwind);
| linux-master | tools/perf/tests/dwarf-unwind.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <string.h>
#include "tests.h"
#include "units.h"
#include "debug.h"
static int test__unit_number__scnprint(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
struct {
u64 n;
const char *str;
} test[] = {
{ 1, "1B" },
{ 10*1024, "10K" },
{ 20*1024*1024, "20M" },
{ 30*1024*1024*1024ULL, "30G" },
{ 0, "0B" },
{ 0, NULL },
};
unsigned i = 0;
while (test[i].str) {
char buf[100];
unit_number__scnprintf(buf, sizeof(buf), test[i].n);
pr_debug("n %" PRIu64 ", str '%s', buf '%s'\n",
test[i].n, test[i].str, buf);
if (strcmp(test[i].str, buf))
return TEST_FAIL;
i++;
}
return TEST_OK;
}
DEFINE_SUITE("unit_number__scnprintf", unit_number__scnprint);
| linux-master | tools/perf/tests/unit_number__scnprintf.c |
// SPDX-License-Identifier: GPL-2.0
#include "tests.h"
#include "machine.h"
#include "thread.h"
#include "debug.h"
static int test__thread_maps_share(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machines machines;
struct machine *machine;
/* thread group */
struct thread *leader;
struct thread *t1, *t2, *t3;
struct maps *maps;
/* other process */
struct thread *other, *other_leader;
struct maps *other_maps;
/*
* This test create 2 processes abstractions (struct thread)
* with several threads and checks they properly share and
* maintain maps info (struct maps).
*
* thread group (pid: 0, tids: 0, 1, 2, 3)
* other group (pid: 4, tids: 4, 5)
*/
machines__init(&machines);
machine = &machines.host;
/* create process with 4 threads */
leader = machine__findnew_thread(machine, 0, 0);
t1 = machine__findnew_thread(machine, 0, 1);
t2 = machine__findnew_thread(machine, 0, 2);
t3 = machine__findnew_thread(machine, 0, 3);
/* and create 1 separated process, without thread leader */
other = machine__findnew_thread(machine, 4, 5);
TEST_ASSERT_VAL("failed to create threads",
leader && t1 && t2 && t3 && other);
maps = thread__maps(leader);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4);
/* test the maps pointer is shared */
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t1)));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t2)));
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t3)));
/*
* Verify the other leader was created by previous call.
* It should have shared maps with no change in
* refcnt.
*/
other_leader = machine__find_thread(machine, 4, 4);
TEST_ASSERT_VAL("failed to find other leader", other_leader);
/*
* Ok, now that all the rbtree related operations were done,
* lets remove all of them from there so that we can do the
* refcounting tests.
*/
machine__remove_thread(machine, leader);
machine__remove_thread(machine, t1);
machine__remove_thread(machine, t2);
machine__remove_thread(machine, t3);
machine__remove_thread(machine, other);
machine__remove_thread(machine, other_leader);
other_maps = thread__maps(other);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2);
TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(other_maps) ==
RC_CHK_ACCESS(thread__maps(other_leader)));
/* release thread group */
thread__put(t3);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 3);
thread__put(t2);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 2);
thread__put(t1);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 1);
thread__put(leader);
/* release other group */
thread__put(other_leader);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 1);
thread__put(other);
machines__exit(&machines);
return 0;
}
DEFINE_SUITE("Share thread maps", thread_maps_share);
| linux-master | tools/perf/tests/thread-maps-share.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <inttypes.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <sys/param.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
#include <perf/mmap.h>
#include "debug.h"
#include "dso.h"
#include "env.h"
#include "parse-events.h"
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "machine.h"
#include "map.h"
#include "symbol.h"
#include "event.h"
#include "record.h"
#include "util/mmap.h"
#include "util/string2.h"
#include "util/synthetic-events.h"
#include "util/util.h"
#include "thread.h"
#include "tests.h"
#include <linux/ctype.h>
#define BUFSZ 1024
#define READLEN 128
struct state {
u64 done[1024];
size_t done_cnt;
};
static size_t read_objdump_chunk(const char **line, unsigned char **buf,
size_t *buf_len)
{
size_t bytes_read = 0;
unsigned char *chunk_start = *buf;
/* Read bytes */
while (*buf_len > 0) {
char c1, c2;
/* Get 2 hex digits */
c1 = *(*line)++;
if (!isxdigit(c1))
break;
c2 = *(*line)++;
if (!isxdigit(c2))
break;
/* Store byte and advance buf */
**buf = (hex(c1) << 4) | hex(c2);
(*buf)++;
(*buf_len)--;
bytes_read++;
/* End of chunk? */
if (isspace(**line))
break;
}
/*
* objdump will display raw insn as LE if code endian
* is LE and bytes_per_chunk > 1. In that case reverse
* the chunk we just read.
*
* see disassemble_bytes() at binutils/objdump.c for details
* how objdump chooses display endian)
*/
if (bytes_read > 1 && !host_is_bigendian()) {
unsigned char *chunk_end = chunk_start + bytes_read - 1;
unsigned char tmp;
while (chunk_start < chunk_end) {
tmp = *chunk_start;
*chunk_start = *chunk_end;
*chunk_end = tmp;
chunk_start++;
chunk_end--;
}
}
return bytes_read;
}
static size_t read_objdump_line(const char *line, unsigned char *buf,
size_t buf_len)
{
const char *p;
size_t ret, bytes_read = 0;
/* Skip to a colon */
p = strchr(line, ':');
if (!p)
return 0;
p++;
/* Skip initial spaces */
while (*p) {
if (!isspace(*p))
break;
p++;
}
do {
ret = read_objdump_chunk(&p, &buf, &buf_len);
bytes_read += ret;
p++;
} while (ret > 0);
/* return number of successfully read bytes */
return bytes_read;
}
static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
{
char *line = NULL;
size_t line_len, off_last = 0;
ssize_t ret;
int err = 0;
u64 addr, last_addr = start_addr;
while (off_last < *len) {
size_t off, read_bytes, written_bytes;
unsigned char tmp[BUFSZ];
ret = getline(&line, &line_len, f);
if (feof(f))
break;
if (ret < 0) {
pr_debug("getline failed\n");
err = -1;
break;
}
/* read objdump data into temporary buffer */
read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
if (!read_bytes)
continue;
if (sscanf(line, "%"PRIx64, &addr) != 1)
continue;
if (addr < last_addr) {
pr_debug("addr going backwards, read beyond section?\n");
break;
}
last_addr = addr;
/* copy it from temporary buffer to 'buf' according
* to address on current objdump line */
off = addr - start_addr;
if (off >= *len)
break;
written_bytes = MIN(read_bytes, *len - off);
memcpy(buf + off, tmp, written_bytes);
off_last = off + written_bytes;
}
/* len returns number of bytes that could not be read */
*len -= off_last;
free(line);
return err;
}
static int read_via_objdump(const char *filename, u64 addr, void *buf,
size_t len)
{
char cmd[PATH_MAX * 2];
const char *fmt;
FILE *f;
int ret;
fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
filename);
if (ret <= 0 || (size_t)ret >= sizeof(cmd))
return -1;
pr_debug("Objdump command is: %s\n", cmd);
/* Ignore objdump errors */
strcat(cmd, " 2>/dev/null");
f = popen(cmd, "r");
if (!f) {
pr_debug("popen failed\n");
return -1;
}
ret = read_objdump_output(f, buf, &len, addr);
if (len) {
pr_debug("objdump read too few bytes: %zd\n", len);
if (!ret)
ret = len;
}
pclose(f);
return ret;
}
static void dump_buf(unsigned char *buf, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
pr_debug("0x%02x ", buf[i]);
if (i % 16 == 15)
pr_debug("\n");
}
pr_debug("\n");
}
static int read_object_code(u64 addr, size_t len, u8 cpumode,
struct thread *thread, struct state *state)
{
struct addr_location al;
unsigned char buf1[BUFSZ] = {0};
unsigned char buf2[BUFSZ] = {0};
size_t ret_len;
u64 objdump_addr;
const char *objdump_name;
char decomp_name[KMOD_DECOMP_LEN];
bool decomp = false;
int ret, err = 0;
struct dso *dso;
pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
addr_location__init(&al);
if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
pr_debug("Hypervisor address can not be resolved - skipping\n");
goto out;
}
pr_debug("thread__find_map failed\n");
err = -1;
goto out;
}
dso = map__dso(al.map);
pr_debug("File is: %s\n", dso->long_name);
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
pr_debug("Unexpected kernel address - skipping\n");
goto out;
}
pr_debug("On file address is: %#"PRIx64"\n", al.addr);
if (len > BUFSZ)
len = BUFSZ;
/* Do not go off the map */
if (addr + len > map__end(al.map))
len = map__end(al.map) - addr;
/* Read the object code using perf */
ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
al.addr, buf1, len);
if (ret_len != len) {
pr_debug("dso__data_read_offset failed\n");
err = -1;
goto out;
}
/*
* Converting addresses for use by objdump requires more information.
* map__load() does that. See map__rip_2objdump() for details.
*/
if (map__load(al.map)) {
err = -1;
goto out;
}
/* objdump struggles with kcore - try each map only once */
if (dso__is_kcore(dso)) {
size_t d;
for (d = 0; d < state->done_cnt; d++) {
if (state->done[d] == map__start(al.map)) {
pr_debug("kcore map tested already");
pr_debug(" - skipping\n");
goto out;
}
}
if (state->done_cnt >= ARRAY_SIZE(state->done)) {
pr_debug("Too many kcore maps - skipping\n");
goto out;
}
state->done[state->done_cnt++] = map__start(al.map);
}
objdump_name = dso->long_name;
if (dso__needs_decompress(dso)) {
if (dso__decompress_kmodule_path(dso, objdump_name,
decomp_name,
sizeof(decomp_name)) < 0) {
pr_debug("decompression failed\n");
err = -1;
goto out;
}
decomp = true;
objdump_name = decomp_name;
}
/* Read the object code using objdump */
objdump_addr = map__rip_2objdump(al.map, al.addr);
ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
if (decomp)
unlink(objdump_name);
if (ret > 0) {
/*
* The kernel maps are inaccurate - assume objdump is right in
* that case.
*/
if (cpumode == PERF_RECORD_MISC_KERNEL ||
cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
len -= ret;
if (len) {
pr_debug("Reducing len to %zu\n", len);
} else if (dso__is_kcore(dso)) {
/*
* objdump cannot handle very large segments
* that may be found in kcore.
*/
pr_debug("objdump failed for kcore");
pr_debug(" - skipping\n");
} else {
err = -1;
}
goto out;
}
}
if (ret < 0) {
pr_debug("read_via_objdump failed\n");
err = -1;
goto out;
}
/* The results should be identical */
if (memcmp(buf1, buf2, len)) {
pr_debug("Bytes read differ from those read by objdump\n");
pr_debug("buf1 (dso):\n");
dump_buf(buf1, len);
pr_debug("buf2 (objdump):\n");
dump_buf(buf2, len);
err = -1;
goto out;
}
pr_debug("Bytes read match those read by objdump\n");
out:
addr_location__exit(&al);
return err;
}
static int process_sample_event(struct machine *machine,
struct evlist *evlist,
union perf_event *event, struct state *state)
{
struct perf_sample sample;
struct thread *thread;
int ret;
if (evlist__parse_sample(evlist, event, &sample)) {
pr_debug("evlist__parse_sample failed\n");
return -1;
}
thread = machine__findnew_thread(machine, sample.pid, sample.tid);
if (!thread) {
pr_debug("machine__findnew_thread failed\n");
return -1;
}
ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
thread__put(thread);
return ret;
}
static int process_event(struct machine *machine, struct evlist *evlist,
union perf_event *event, struct state *state)
{
if (event->header.type == PERF_RECORD_SAMPLE)
return process_sample_event(machine, evlist, event, state);
if (event->header.type == PERF_RECORD_THROTTLE ||
event->header.type == PERF_RECORD_UNTHROTTLE)
return 0;
if (event->header.type < PERF_RECORD_MAX) {
int ret;
ret = machine__process_event(machine, event, NULL);
if (ret < 0)
pr_debug("machine__process_event failed, event type %u\n",
event->header.type);
return ret;
}
return 0;
}
static int process_events(struct machine *machine, struct evlist *evlist,
struct state *state)
{
union perf_event *event;
struct mmap *md;
int i, ret;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
ret = process_event(machine, evlist, event, state);
perf_mmap__consume(&md->core);
if (ret < 0)
return ret;
}
perf_mmap__read_done(&md->core);
}
return 0;
}
static int comp(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
static void do_sort_something(void)
{
int buf[40960], i;
for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
buf[i] = ARRAY_SIZE(buf) - i - 1;
qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
if (buf[i] != i) {
pr_debug("qsort failed\n");
break;
}
}
}
static void sort_something(void)
{
int i;
for (i = 0; i < 10; i++)
do_sort_something();
}
static void syscall_something(void)
{
int pipefd[2];
int i;
for (i = 0; i < 1000; i++) {
if (pipe(pipefd) < 0) {
pr_debug("pipe failed\n");
break;
}
close(pipefd[1]);
close(pipefd[0]);
}
}
static void fs_something(void)
{
const char *test_file_name = "temp-perf-code-reading-test-file--";
FILE *f;
int i;
for (i = 0; i < 1000; i++) {
f = fopen(test_file_name, "w+");
if (f) {
fclose(f);
unlink(test_file_name);
}
}
}
#ifdef __s390x__
#include "header.h" // for get_cpuid()
#endif
static const char *do_determine_event(bool excl_kernel)
{
const char *event = excl_kernel ? "cycles:u" : "cycles";
#ifdef __s390x__
char cpuid[128], model[16], model_c[16], cpum_cf_v[16];
unsigned int family;
int ret, cpum_cf_a;
if (get_cpuid(cpuid, sizeof(cpuid)))
goto out_clocks;
ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c,
model, cpum_cf_v, &cpum_cf_a);
if (ret != 5) /* Not available */
goto out_clocks;
if (excl_kernel && (cpum_cf_a & 4))
return event;
if (!excl_kernel && (cpum_cf_a & 2))
return event;
/* Fall through: missing authorization */
out_clocks:
event = excl_kernel ? "cpu-clock:u" : "cpu-clock";
#endif
return event;
}
static void do_something(void)
{
fs_something();
sort_something();
syscall_something();
}
enum {
TEST_CODE_READING_OK,
TEST_CODE_READING_NO_VMLINUX,
TEST_CODE_READING_NO_KCORE,
TEST_CODE_READING_NO_ACCESS,
TEST_CODE_READING_NO_KERNEL_OBJ,
};
static int do_test_code_reading(bool try_kcore)
{
struct machine *machine;
struct thread *thread;
struct record_opts opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 500,
.target = {
.uses_mmap = true,
},
};
struct state state = {
.done_cnt = 0,
};
struct perf_thread_map *threads = NULL;
struct perf_cpu_map *cpus = NULL;
struct evlist *evlist = NULL;
struct evsel *evsel = NULL;
int err = -1, ret;
pid_t pid;
struct map *map;
bool have_vmlinux, have_kcore, excl_kernel = false;
struct dso *dso;
pid = getpid();
machine = machine__new_host();
machine->env = &perf_env;
ret = machine__create_kernel_maps(machine);
if (ret < 0) {
pr_debug("machine__create_kernel_maps failed\n");
goto out_err;
}
/* Force the use of kallsyms instead of vmlinux to try kcore */
if (try_kcore)
symbol_conf.kallsyms_name = "/proc/kallsyms";
/* Load kernel map */
map = machine__kernel_map(machine);
ret = map__load(map);
if (ret < 0) {
pr_debug("map__load failed\n");
goto out_err;
}
dso = map__dso(map);
have_vmlinux = dso__is_vmlinux(dso);
have_kcore = dso__is_kcore(dso);
/* 2nd time through we just try kcore */
if (try_kcore && !have_kcore)
return TEST_CODE_READING_NO_KCORE;
/* No point getting kernel events if there is no kernel object */
if (!have_vmlinux && !have_kcore)
excl_kernel = true;
threads = thread_map__new_by_tid(pid);
if (!threads) {
pr_debug("thread_map__new_by_tid failed\n");
goto out_err;
}
ret = perf_event__synthesize_thread_map(NULL, threads,
perf_event__process, machine,
true, false);
if (ret < 0) {
pr_debug("perf_event__synthesize_thread_map failed\n");
goto out_err;
}
thread = machine__findnew_thread(machine, pid, pid);
if (!thread) {
pr_debug("machine__findnew_thread failed\n");
goto out_put;
}
cpus = perf_cpu_map__new(NULL);
if (!cpus) {
pr_debug("perf_cpu_map__new failed\n");
goto out_put;
}
while (1) {
const char *str;
evlist = evlist__new();
if (!evlist) {
pr_debug("evlist__new failed\n");
goto out_put;
}
perf_evlist__set_maps(&evlist->core, cpus, threads);
str = do_determine_event(excl_kernel);
pr_debug("Parsing event '%s'\n", str);
ret = parse_event(evlist, str);
if (ret < 0) {
pr_debug("parse_events failed\n");
goto out_put;
}
evlist__config(evlist, &opts, NULL);
evsel = evlist__first(evlist);
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
evsel->core.attr.enable_on_exec = 0;
ret = evlist__open(evlist);
if (ret < 0) {
if (!excl_kernel) {
excl_kernel = true;
/*
* Both cpus and threads are now owned by evlist
* and will be freed by following perf_evlist__set_maps
* call. Getting reference to keep them alive.
*/
perf_cpu_map__get(cpus);
perf_thread_map__get(threads);
perf_evlist__set_maps(&evlist->core, NULL, NULL);
evlist__delete(evlist);
evlist = NULL;
continue;
}
if (verbose > 0) {
char errbuf[512];
evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
}
goto out_put;
}
break;
}
ret = evlist__mmap(evlist, UINT_MAX);
if (ret < 0) {
pr_debug("evlist__mmap failed\n");
goto out_put;
}
evlist__enable(evlist);
do_something();
evlist__disable(evlist);
ret = process_events(machine, evlist, &state);
if (ret < 0)
goto out_put;
if (!have_vmlinux && !have_kcore && !try_kcore)
err = TEST_CODE_READING_NO_KERNEL_OBJ;
else if (!have_vmlinux && !try_kcore)
err = TEST_CODE_READING_NO_VMLINUX;
else if (excl_kernel)
err = TEST_CODE_READING_NO_ACCESS;
else
err = TEST_CODE_READING_OK;
out_put:
thread__put(thread);
out_err:
evlist__delete(evlist);
perf_cpu_map__put(cpus);
perf_thread_map__put(threads);
machine__delete(machine);
return err;
}
static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret;
ret = do_test_code_reading(false);
if (!ret)
ret = do_test_code_reading(true);
switch (ret) {
case TEST_CODE_READING_OK:
return 0;
case TEST_CODE_READING_NO_VMLINUX:
pr_debug("no vmlinux\n");
return 0;
case TEST_CODE_READING_NO_KCORE:
pr_debug("no kcore\n");
return 0;
case TEST_CODE_READING_NO_ACCESS:
pr_debug("no access\n");
return 0;
case TEST_CODE_READING_NO_KERNEL_OBJ:
pr_debug("no kernel obj\n");
return 0;
default:
return -1;
};
}
DEFINE_SUITE("Object code reading", code_reading);
| linux-master | tools/perf/tests/code-reading.c |
// SPDX-License-Identifier: GPL-2.0
#include <api/fd/array.h>
#include <poll.h>
#include "util/debug.h"
#include "tests/tests.h"
static void fdarray__init_revents(struct fdarray *fda, short revents)
{
int fd;
fda->nr = fda->nr_alloc;
for (fd = 0; fd < fda->nr; ++fd) {
fda->entries[fd].fd = fda->nr - fd;
fda->entries[fd].events = revents;
fda->entries[fd].revents = revents;
}
}
static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE *fp)
{
int printed = 0;
if (verbose <= 0)
return 0;
printed += fprintf(fp, "\n%s: ", prefix);
return printed + fdarray__fprintf(fda, fp);
}
static int test__fdarray__filter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int nr_fds, err = TEST_FAIL;
struct fdarray *fda = fdarray__new(5, 5);
if (fda == NULL) {
pr_debug("\nfdarray__new() failed!");
goto out;
}
fdarray__init_revents(fda, POLLIN);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
if (nr_fds != fda->nr_alloc) {
pr_debug("\nfdarray__filter()=%d != %d shouldn't have filtered anything",
nr_fds, fda->nr_alloc);
goto out_delete;
}
fdarray__init_revents(fda, POLLHUP);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
if (nr_fds != 0) {
pr_debug("\nfdarray__filter()=%d != %d, should have filtered all fds",
nr_fds, fda->nr_alloc);
goto out_delete;
}
fdarray__init_revents(fda, POLLHUP);
fda->entries[2].revents = POLLIN;
pr_debug("\nfiltering all but fda->entries[2]:");
fdarray__fprintf_prefix(fda, "before", stderr);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
fdarray__fprintf_prefix(fda, " after", stderr);
if (nr_fds != 1) {
pr_debug("\nfdarray__filter()=%d != 1, should have left just one event", nr_fds);
goto out_delete;
}
fdarray__init_revents(fda, POLLHUP);
fda->entries[0].revents = POLLIN;
fda->entries[3].revents = POLLIN;
pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
fdarray__fprintf_prefix(fda, "before", stderr);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
fdarray__fprintf_prefix(fda, " after", stderr);
if (nr_fds != 2) {
pr_debug("\nfdarray__filter()=%d != 2, should have left just two events",
nr_fds);
goto out_delete;
}
pr_debug("\n");
err = 0;
out_delete:
fdarray__delete(fda);
out:
return err;
}
static int test__fdarray__add(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct fdarray *fda = fdarray__new(2, 2);
if (fda == NULL) {
pr_debug("\nfdarray__new() failed!");
goto out;
}
#define FDA_CHECK(_idx, _fd, _revents) \
if (fda->entries[_idx].fd != _fd) { \
pr_debug("\n%d: fda->entries[%d](%d) != %d!", \
__LINE__, _idx, fda->entries[1].fd, _fd); \
goto out_delete; \
} \
if (fda->entries[_idx].events != (_revents)) { \
pr_debug("\n%d: fda->entries[%d].revents(%d) != %d!", \
__LINE__, _idx, fda->entries[_idx].fd, _revents); \
goto out_delete; \
}
#define FDA_ADD(_idx, _fd, _revents, _nr) \
if (fdarray__add(fda, _fd, _revents, fdarray_flag__default) < 0) { \
pr_debug("\n%d: fdarray__add(fda, %d, %d) failed!", \
__LINE__,_fd, _revents); \
goto out_delete; \
} \
if (fda->nr != _nr) { \
pr_debug("\n%d: fdarray__add(fda, %d, %d)=%d != %d", \
__LINE__,_fd, _revents, fda->nr, _nr); \
goto out_delete; \
} \
FDA_CHECK(_idx, _fd, _revents)
FDA_ADD(0, 1, POLLIN, 1);
FDA_ADD(1, 2, POLLERR, 2);
fdarray__fprintf_prefix(fda, "before growing array", stderr);
FDA_ADD(2, 35, POLLHUP, 3);
if (fda->entries == NULL) {
pr_debug("\nfdarray__add(fda, 35, POLLHUP) should have allocated fda->pollfd!");
goto out_delete;
}
fdarray__fprintf_prefix(fda, "after 3rd add", stderr);
FDA_ADD(3, 88, POLLIN | POLLOUT, 4);
fdarray__fprintf_prefix(fda, "after 4th add", stderr);
FDA_CHECK(0, 1, POLLIN);
FDA_CHECK(1, 2, POLLERR);
FDA_CHECK(2, 35, POLLHUP);
FDA_CHECK(3, 88, POLLIN | POLLOUT);
#undef FDA_ADD
#undef FDA_CHECK
pr_debug("\n");
err = 0;
out_delete:
fdarray__delete(fda);
out:
return err;
}
DEFINE_SUITE("Filter fds with revents mask in a fdarray", fdarray__filter);
DEFINE_SUITE("Add fd to a fdarray, making it autogrow", fdarray__add);
| linux-master | tools/perf/tests/fdarray.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test support for libpfm4 event encodings.
*
* Copyright 2020 Google LLC.
*/
#include "tests.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/pfm.h"
#include <linux/kernel.h>
#ifdef HAVE_LIBPFM
static int count_pfm_events(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
int count = 0;
perf_evlist__for_each_entry(evlist, evsel) {
count++;
}
return count;
}
static int test__pfm_events(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct evlist *evlist;
struct option opt;
size_t i;
const struct {
const char *events;
int nr_events;
} table[] = {
{
.events = "",
.nr_events = 0,
},
{
.events = "instructions",
.nr_events = 1,
},
{
.events = "instructions,cycles",
.nr_events = 2,
},
{
.events = "stereolab",
.nr_events = 0,
},
{
.events = "instructions,instructions",
.nr_events = 2,
},
{
.events = "stereolab,instructions",
.nr_events = 0,
},
{
.events = "instructions,stereolab",
.nr_events = 1,
},
};
for (i = 0; i < ARRAY_SIZE(table); i++) {
evlist = evlist__new();
if (evlist == NULL)
return -ENOMEM;
opt.value = evlist;
parse_libpfm_events_option(&opt,
table[i].events,
0);
TEST_ASSERT_EQUAL(table[i].events,
count_pfm_events(&evlist->core),
table[i].nr_events);
TEST_ASSERT_EQUAL(table[i].events,
evlist__nr_groups(evlist),
0);
evlist__delete(evlist);
}
return 0;
}
static int test__pfm_group(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct evlist *evlist;
struct option opt;
size_t i;
const struct {
const char *events;
int nr_events;
int nr_groups;
} table[] = {
{
.events = "{},",
.nr_events = 0,
.nr_groups = 0,
},
{
.events = "{instructions}",
.nr_events = 1,
.nr_groups = 0,
},
{
.events = "{instructions},{}",
.nr_events = 1,
.nr_groups = 0,
},
{
.events = "{},{instructions}",
.nr_events = 1,
.nr_groups = 0,
},
{
.events = "{instructions},{instructions}",
.nr_events = 2,
.nr_groups = 0,
},
{
.events = "{instructions,cycles},{instructions,cycles}",
.nr_events = 4,
.nr_groups = 2,
},
{
.events = "{stereolab}",
.nr_events = 0,
.nr_groups = 0,
},
{
.events =
"{instructions,cycles},{instructions,stereolab}",
.nr_events = 3,
.nr_groups = 1,
},
{
.events = "instructions}",
.nr_events = 1,
.nr_groups = 0,
},
{
.events = "{{instructions}}",
.nr_events = 0,
.nr_groups = 0,
},
};
for (i = 0; i < ARRAY_SIZE(table); i++) {
evlist = evlist__new();
if (evlist == NULL)
return -ENOMEM;
opt.value = evlist;
parse_libpfm_events_option(&opt,
table[i].events,
0);
TEST_ASSERT_EQUAL(table[i].events,
count_pfm_events(&evlist->core),
table[i].nr_events);
TEST_ASSERT_EQUAL(table[i].events,
evlist__nr_groups(evlist),
table[i].nr_groups);
evlist__delete(evlist);
}
return 0;
}
#else
static int test__pfm_events(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
return TEST_SKIP;
}
static int test__pfm_group(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
return TEST_SKIP;
}
#endif
static struct test_case pfm_tests[] = {
TEST_CASE_REASON("test of individual --pfm-events", pfm_events, "not compiled in"),
TEST_CASE_REASON("test groups of --pfm-events", pfm_group, "not compiled in"),
{ .name = NULL, }
};
struct test_suite suite__pfm = {
.desc = "Test libpfm4 support",
.test_cases = pfm_tests,
};
| linux-master | tools/perf/tests/pfm.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/types.h>
#include <sys/prctl.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
#include <perf/mmap.h>
#include "debug.h"
#include "parse-events.h"
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "record.h"
#include "tsc.h"
#include "mmap.h"
#include "tests.h"
#include "util/sample.h"
/*
* Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just
* enable the test for x86_64/i386 and Arm64 archs.
*/
#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
#define TSC_IS_SUPPORTED 1
#else
#define TSC_IS_SUPPORTED 0
#endif
#define CHECK__(x) { \
while ((x) < 0) { \
pr_debug(#x " failed!\n"); \
goto out_err; \
} \
}
#define CHECK_NOT_NULL__(x) { \
while ((x) == NULL) { \
pr_debug(#x " failed!\n"); \
goto out_err; \
} \
}
static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
if (!TSC_IS_SUPPORTED) {
pr_debug("Test not supported on this architecture\n");
return TEST_SKIP;
}
return TEST_OK;
}
/**
* test__perf_time_to_tsc - test converting perf time to TSC.
*
* This function implements a test that checks that the conversion of perf time
* to and from TSC is consistent with the order of events. If the test passes
* %0 is returned, otherwise %-1 is returned. If TSC conversion is not
* supported then the test passes but " (not supported)" is printed.
*/
static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct record_opts opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.target = {
.uses_mmap = true,
},
.sample_time = true,
};
struct perf_thread_map *threads = NULL;
struct perf_cpu_map *cpus = NULL;
struct evlist *evlist = NULL;
struct evsel *evsel = NULL;
int err = TEST_FAIL, ret, i;
const char *comm1, *comm2;
struct perf_tsc_conversion tc;
struct perf_event_mmap_page *pc;
union perf_event *event;
u64 test_tsc, comm1_tsc, comm2_tsc;
u64 test_time, comm1_time = 0, comm2_time = 0;
struct mmap *md;
threads = thread_map__new(-1, getpid(), UINT_MAX);
CHECK_NOT_NULL__(threads);
cpus = perf_cpu_map__new(NULL);
CHECK_NOT_NULL__(cpus);
evlist = evlist__new();
CHECK_NOT_NULL__(evlist);
perf_evlist__set_maps(&evlist->core, cpus, threads);
CHECK__(parse_event(evlist, "cycles:u"));
evlist__config(evlist, &opts, NULL);
/* For hybrid "cycles:u", it creates two events */
evlist__for_each_entry(evlist, evsel) {
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
evsel->core.attr.enable_on_exec = 0;
}
ret = evlist__open(evlist);
if (ret < 0) {
if (ret == -ENOENT)
err = TEST_SKIP;
else
pr_debug("evlist__open() failed\n");
goto out_err;
}
CHECK__(evlist__mmap(evlist, UINT_MAX));
pc = evlist->mmap[0].core.base;
ret = perf_read_tsc_conversion(pc, &tc);
if (ret) {
if (ret == -EOPNOTSUPP) {
pr_debug("perf_read_tsc_conversion is not supported in current kernel\n");
err = TEST_SKIP;
}
goto out_err;
}
evlist__enable(evlist);
comm1 = "Test COMM 1";
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
test_tsc = rdtsc();
comm2 = "Test COMM 2";
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
evlist__disable(evlist);
for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_COMM ||
(pid_t)event->comm.pid != getpid() ||
(pid_t)event->comm.tid != getpid())
goto next_event;
if (strcmp(event->comm.comm, comm1) == 0) {
CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
CHECK__(evsel__parse_sample(evsel, event, &sample));
comm1_time = sample.time;
}
if (strcmp(event->comm.comm, comm2) == 0) {
CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
CHECK__(evsel__parse_sample(evsel, event, &sample));
comm2_time = sample.time;
}
next_event:
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(&md->core);
}
if (!comm1_time || !comm2_time)
goto out_err;
test_time = tsc_to_perf_time(test_tsc, &tc);
comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
comm2_tsc = perf_time_to_tsc(comm2_time, &tc);
pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
comm1_time, comm1_tsc);
pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n",
test_time, test_tsc);
pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
comm2_time, comm2_tsc);
if (test_time <= comm1_time ||
test_time >= comm2_time)
goto out_err;
if (test_tsc <= comm1_tsc ||
test_tsc >= comm2_tsc)
goto out_err;
err = TEST_OK;
out_err:
evlist__delete(evlist);
perf_cpu_map__put(cpus);
perf_thread_map__put(threads);
return err;
}
static struct test_case time_to_tsc_tests[] = {
TEST_CASE_REASON("TSC support", tsc_is_supported,
"This architecture does not support"),
TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc,
"perf_read_tsc_conversion is not supported"),
{ .name = NULL, }
};
struct test_suite suite__perf_time_to_tsc = {
.desc = "Convert perf time to TSC",
.test_cases = time_to_tsc_tests,
};
| linux-master | tools/perf/tests/perf-time-to-tsc.c |
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <stdlib.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <api/fs/fs.h>
#include "dso.h"
#include "machine.h"
#include "symbol.h"
#include "tests.h"
#include "debug.h"
static char *test_file(int size)
{
#define TEMPL "/tmp/perf-test-XXXXXX"
static char buf_templ[sizeof(TEMPL)];
char *templ = buf_templ;
int fd, i;
unsigned char *buf;
strcpy(buf_templ, TEMPL);
#undef TEMPL
fd = mkstemp(templ);
if (fd < 0) {
perror("mkstemp failed");
return NULL;
}
buf = malloc(size);
if (!buf) {
close(fd);
return NULL;
}
for (i = 0; i < size; i++)
buf[i] = (unsigned char) ((int) i % 10);
if (size != write(fd, buf, size))
templ = NULL;
free(buf);
close(fd);
return templ;
}
#define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20)
struct test_data_offset {
off_t offset;
u8 data[10];
int size;
};
struct test_data_offset offsets[] = {
/* Fill first cache page. */
{
.offset = 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read first cache page. */
{
.offset = 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Fill cache boundary pages. */
{
.offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read cache boundary pages. */
{
.offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Fill final cache page. */
{
.offset = TEST_FILE_SIZE - 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read final cache page. */
{
.offset = TEST_FILE_SIZE - 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read final cache page. */
{
.offset = TEST_FILE_SIZE - 3,
.data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 },
.size = 3,
},
};
/* move it from util/dso.c for compatibility */
static int dso__data_fd(struct dso *dso, struct machine *machine)
{
int fd = dso__data_get_fd(dso, machine);
if (fd >= 0)
dso__data_put_fd(dso);
return fd;
}
static int test__dso_data(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machine machine;
struct dso *dso;
char *file = test_file(TEST_FILE_SIZE);
size_t i;
TEST_ASSERT_VAL("No test file", file);
memset(&machine, 0, sizeof(machine));
dso = dso__new((const char *)file);
TEST_ASSERT_VAL("Failed to access to dso",
dso__data_fd(dso, &machine) >= 0);
/* Basic 10 bytes tests. */
for (i = 0; i < ARRAY_SIZE(offsets); i++) {
struct test_data_offset *data = &offsets[i];
ssize_t size;
u8 buf[10];
memset(buf, 0, 10);
size = dso__data_read_offset(dso, &machine, data->offset,
buf, 10);
TEST_ASSERT_VAL("Wrong size", size == data->size);
TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10));
}
/* Read cross multiple cache pages. */
{
ssize_t size;
int c;
u8 *buf;
buf = malloc(TEST_FILE_SIZE);
TEST_ASSERT_VAL("ENOMEM\n", buf);
/* First iteration to fill caches, second one to read them. */
for (c = 0; c < 2; c++) {
memset(buf, 0, TEST_FILE_SIZE);
size = dso__data_read_offset(dso, &machine, 10,
buf, TEST_FILE_SIZE);
TEST_ASSERT_VAL("Wrong size",
size == (TEST_FILE_SIZE - 10));
for (i = 0; i < (size_t)size; i++)
TEST_ASSERT_VAL("Wrong data",
buf[i] == (i % 10));
}
free(buf);
}
dso__put(dso);
unlink(file);
return 0;
}
static long open_files_cnt(void)
{
char path[PATH_MAX];
struct dirent *dent;
DIR *dir;
long nr = 0;
scnprintf(path, PATH_MAX, "%s/self/fd", procfs__mountpoint());
pr_debug("fd path: %s\n", path);
dir = opendir(path);
TEST_ASSERT_VAL("failed to open fd directory", dir);
while ((dent = readdir(dir)) != NULL) {
if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, ".."))
continue;
nr++;
}
closedir(dir);
return nr - 1;
}
static struct dso **dsos;
static int dsos__create(int cnt, int size)
{
int i;
dsos = malloc(sizeof(*dsos) * cnt);
TEST_ASSERT_VAL("failed to alloc dsos array", dsos);
for (i = 0; i < cnt; i++) {
char *file;
file = test_file(size);
TEST_ASSERT_VAL("failed to get dso file", file);
dsos[i] = dso__new(file);
TEST_ASSERT_VAL("failed to get dso", dsos[i]);
}
return 0;
}
static void dsos__delete(int cnt)
{
int i;
for (i = 0; i < cnt; i++) {
struct dso *dso = dsos[i];
unlink(dso->name);
dso__put(dso);
}
free(dsos);
}
static int set_fd_limit(int n)
{
struct rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim))
return -1;
pr_debug("file limit %ld, new %d\n", (long) rlim.rlim_cur, n);
rlim.rlim_cur = n;
return setrlimit(RLIMIT_NOFILE, &rlim);
}
static int test__dso_data_cache(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machine machine;
long nr_end, nr = open_files_cnt();
int dso_cnt, limit, i, fd;
/* Rest the internal dso open counter limit. */
reset_fd_limit();
memset(&machine, 0, sizeof(machine));
/* set as system limit */
limit = nr * 4;
TEST_ASSERT_VAL("failed to set file limit", !set_fd_limit(limit));
/* and this is now our dso open FDs limit */
dso_cnt = limit / 2;
TEST_ASSERT_VAL("failed to create dsos\n",
!dsos__create(dso_cnt, TEST_FILE_SIZE));
for (i = 0; i < (dso_cnt - 1); i++) {
struct dso *dso = dsos[i];
/*
* Open dsos via dso__data_fd(), it opens the data
* file and keep it open (unless open file limit).
*/
fd = dso__data_fd(dso, &machine);
TEST_ASSERT_VAL("failed to get fd", fd > 0);
if (i % 2) {
#define BUFSIZE 10
u8 buf[BUFSIZE];
ssize_t n;
n = dso__data_read_offset(dso, &machine, 0, buf, BUFSIZE);
TEST_ASSERT_VAL("failed to read dso", n == BUFSIZE);
}
}
/* verify the first one is already open */
TEST_ASSERT_VAL("dsos[0] is not open", dsos[0]->data.fd != -1);
/* open +1 dso to reach the allowed limit */
fd = dso__data_fd(dsos[i], &machine);
TEST_ASSERT_VAL("failed to get fd", fd > 0);
/* should force the first one to be closed */
TEST_ASSERT_VAL("failed to close dsos[0]", dsos[0]->data.fd == -1);
/* cleanup everything */
dsos__delete(dso_cnt);
/* Make sure we did not leak any file descriptor. */
nr_end = open_files_cnt();
pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
return 0;
}
static long new_limit(int count)
{
int fd = open("/dev/null", O_RDONLY);
long ret = fd;
if (count > 0)
ret = new_limit(--count);
close(fd);
return ret;
}
static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machine machine;
long nr_end, nr = open_files_cnt(), lim = new_limit(3);
int fd, fd_extra;
#define dso_0 (dsos[0])
#define dso_1 (dsos[1])
#define dso_2 (dsos[2])
/* Rest the internal dso open counter limit. */
reset_fd_limit();
memset(&machine, 0, sizeof(machine));
/*
* Test scenario:
* - create 3 dso objects
* - set process file descriptor limit to current
* files count + 3
* - test that the first dso gets closed when we
* reach the files count limit
*/
/* Make sure we are able to open 3 fds anyway */
TEST_ASSERT_VAL("failed to set file limit",
!set_fd_limit((lim)));
TEST_ASSERT_VAL("failed to create dsos\n", !dsos__create(3, TEST_FILE_SIZE));
/* open dso_0 */
fd = dso__data_fd(dso_0, &machine);
TEST_ASSERT_VAL("failed to get fd", fd > 0);
/* open dso_1 */
fd = dso__data_fd(dso_1, &machine);
TEST_ASSERT_VAL("failed to get fd", fd > 0);
/*
* open extra file descriptor and we just
* reached the files count limit
*/
fd_extra = open("/dev/null", O_RDONLY);
TEST_ASSERT_VAL("failed to open extra fd", fd_extra > 0);
/* open dso_2 */
fd = dso__data_fd(dso_2, &machine);
TEST_ASSERT_VAL("failed to get fd", fd > 0);
/*
* dso_0 should get closed, because we reached
* the file descriptor limit
*/
TEST_ASSERT_VAL("failed to close dso_0", dso_0->data.fd == -1);
/* open dso_0 */
fd = dso__data_fd(dso_0, &machine);
TEST_ASSERT_VAL("failed to get fd", fd > 0);
/*
* dso_1 should get closed, because we reached
* the file descriptor limit
*/
TEST_ASSERT_VAL("failed to close dso_1", dso_1->data.fd == -1);
/* cleanup everything */
close(fd_extra);
dsos__delete(3);
/* Make sure we did not leak any file descriptor. */
nr_end = open_files_cnt();
pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
return 0;
}
DEFINE_SUITE("DSO data read", dso_data);
DEFINE_SUITE("DSO data cache", dso_data_cache);
DEFINE_SUITE("DSO data reopen", dso_data_reopen);
| linux-master | tools/perf/tests/dso-data.c |
// SPDX-License-Identifier: GPL-2.0
#include "util/debug.h"
#include "util/dso.h"
#include "util/event.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/machine.h"
#include "util/parse-events.h"
#include "util/thread.h"
#include "tests/tests.h"
#include "tests/hists_common.h"
#include <linux/kernel.h>
struct sample {
u32 pid;
u64 ip;
struct thread *thread;
struct map *map;
struct symbol *sym;
};
/* For the numbers, see hists_common.c */
static struct sample fake_samples[] = {
/* perf [kernel] schedule() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
/* perf [perf] main() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
/* perf [perf] cmd_record() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
/* perf [libc] malloc() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
/* perf [libc] free() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
/* perf [perf] main() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
/* perf [kernel] page_fault() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
/* bash [bash] main() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
/* bash [bash] xmalloc() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
/* bash [kernel] page_fault() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
};
/*
* Will be cast to struct ip_callchain which has all 64 bit entries
* of nr and ips[].
*/
static u64 fake_callchains[][10] = {
/* schedule => run_command => main */
{ 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
/* main */
{ 1, FAKE_IP_PERF_MAIN, },
/* cmd_record => run_command => main */
{ 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
/* malloc => cmd_record => run_command => main */
{ 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
FAKE_IP_PERF_MAIN, },
/* free => cmd_record => run_command => main */
{ 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
FAKE_IP_PERF_MAIN, },
/* main */
{ 1, FAKE_IP_PERF_MAIN, },
/* page_fault => sys_perf_event_open => run_command => main */
{ 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
/* main */
{ 1, FAKE_IP_BASH_MAIN, },
/* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
{ 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
/* page_fault => malloc => main */
{ 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
};
static int add_hist_entries(struct hists *hists, struct machine *machine)
{
struct addr_location al;
struct evsel *evsel = hists_to_evsel(hists);
struct perf_sample sample = { .period = 1000, };
size_t i;
addr_location__init(&al);
for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = &sample,
.hide_unresolved = false,
};
if (symbol_conf.cumulate_callchain)
iter.ops = &hist_iter_cumulative;
else
iter.ops = &hist_iter_normal;
sample.cpumode = PERF_RECORD_MISC_USER;
sample.pid = fake_samples[i].pid;
sample.tid = fake_samples[i].pid;
sample.ip = fake_samples[i].ip;
sample.callchain = (struct ip_callchain *)fake_callchains[i];
if (machine__resolve(machine, &al, &sample) < 0)
goto out;
if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
NULL) < 0) {
goto out;
}
thread__put(fake_samples[i].thread);
fake_samples[i].thread = thread__get(al.thread);
map__put(fake_samples[i].map);
fake_samples[i].map = map__get(al.map);
fake_samples[i].sym = al.sym;
}
addr_location__exit(&al);
return TEST_OK;
out:
pr_debug("Not enough memory for adding a hist entry\n");
addr_location__exit(&al);
return TEST_FAIL;
}
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
struct rb_root_cached *root_in;
struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
root_in = &hists->entries_collapsed;
else
root_in = hists->entries_in;
root_out = &hists->entries;
while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
rb_erase_cached(node, root_out);
rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
static void put_fake_samples(void)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
map__zput(fake_samples[i].map);
thread__zput(fake_samples[i].thread);
}
}
typedef int (*test_fn_t)(struct evsel *, struct machine *);
#define COMM(he) (thread__comm_str(he->thread))
#define DSO(he) (map__dso(he->ms.map)->short_name)
#define SYM(he) (he->ms.sym->name)
#define CPU(he) (he->cpu)
#define DEPTH(he) (he->callchain->max_depth)
#define CDSO(cl) (map__dso(cl->ms.map)->short_name)
#define CSYM(cl) (cl->ms.sym->name)
struct result {
u64 children;
u64 self;
const char *comm;
const char *dso;
const char *sym;
};
struct callchain_result {
u64 nr;
struct {
const char *dso;
const char *sym;
} node[10];
};
static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
struct callchain_result *expected_callchain, size_t nr_callchain)
{
char buf[32];
size_t i, c;
struct hist_entry *he;
struct rb_root *root;
struct rb_node *node;
struct callchain_node *cnode;
struct callchain_list *clist;
/*
* adding and deleting hist entries must be done outside of this
* function since TEST_ASSERT_VAL() returns in case of failure.
*/
hists__collapse_resort(hists, NULL);
evsel__output_resort(hists_to_evsel(hists), NULL);
if (verbose > 2) {
pr_info("use callchain: %d, cumulate callchain: %d\n",
symbol_conf.use_callchain,
symbol_conf.cumulate_callchain);
print_hists_out(hists);
}
root = &hists->entries.rb_root;
for (node = rb_first(root), i = 0;
node && (he = rb_entry(node, struct hist_entry, rb_node));
node = rb_next(node), i++) {
scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
TEST_ASSERT_VAL("Incorrect number of hist entry",
i < nr_expected);
TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
!strcmp(COMM(he), expected[i].comm) &&
!strcmp(DSO(he), expected[i].dso) &&
!strcmp(SYM(he), expected[i].sym));
if (symbol_conf.cumulate_callchain)
TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
if (!symbol_conf.use_callchain)
continue;
/* check callchain entries */
root = &he->callchain->node.rb_root;
TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
c = 0;
list_for_each_entry(clist, &cnode->val, list) {
scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
TEST_ASSERT_VAL("Incorrect number of callchain entry",
c < expected_callchain[i].nr);
TEST_ASSERT_VAL(buf,
!strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
!strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
c++;
}
/* TODO: handle multiple child nodes properly */
TEST_ASSERT_VAL("Incorrect number of callchain entry",
c <= expected_callchain[i].nr);
}
TEST_ASSERT_VAL("Incorrect number of hist entry",
i == nr_expected);
TEST_ASSERT_VAL("Incorrect number of callchain entry",
!symbol_conf.use_callchain || nr_expected == nr_callchain);
return 0;
}
/* NO callchain + NO children */
static int test1(struct evsel *evsel, struct machine *machine)
{
int err;
struct hists *hists = evsel__hists(evsel);
/*
* expected output:
*
* Overhead Command Shared Object Symbol
* ======== ======= ============= ==============
* 20.00% perf perf [.] main
* 10.00% bash [kernel] [k] page_fault
* 10.00% bash bash [.] main
* 10.00% bash bash [.] xmalloc
* 10.00% perf [kernel] [k] page_fault
* 10.00% perf [kernel] [k] schedule
* 10.00% perf libc [.] free
* 10.00% perf libc [.] malloc
* 10.00% perf perf [.] cmd_record
*/
struct result expected[] = {
{ 0, 2000, "perf", "perf", "main" },
{ 0, 1000, "bash", "[kernel]", "page_fault" },
{ 0, 1000, "bash", "bash", "main" },
{ 0, 1000, "bash", "bash", "xmalloc" },
{ 0, 1000, "perf", "[kernel]", "page_fault" },
{ 0, 1000, "perf", "[kernel]", "schedule" },
{ 0, 1000, "perf", "libc", "free" },
{ 0, 1000, "perf", "libc", "malloc" },
{ 0, 1000, "perf", "perf", "cmd_record" },
};
symbol_conf.use_callchain = false;
symbol_conf.cumulate_callchain = false;
evsel__reset_sample_bit(evsel, CALLCHAIN);
setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
if (err < 0)
goto out;
err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
out:
del_hist_entries(hists);
reset_output_field();
return err;
}
/* callchain + NO children */
static int test2(struct evsel *evsel, struct machine *machine)
{
int err;
struct hists *hists = evsel__hists(evsel);
/*
* expected output:
*
* Overhead Command Shared Object Symbol
* ======== ======= ============= ==============
* 20.00% perf perf [.] main
* |
* --- main
*
* 10.00% bash [kernel] [k] page_fault
* |
* --- page_fault
* malloc
* main
*
* 10.00% bash bash [.] main
* |
* --- main
*
* 10.00% bash bash [.] xmalloc
* |
* --- xmalloc
* malloc
* xmalloc <--- NOTE: there's a cycle
* malloc
* xmalloc
* main
*
* 10.00% perf [kernel] [k] page_fault
* |
* --- page_fault
* sys_perf_event_open
* run_command
* main
*
* 10.00% perf [kernel] [k] schedule
* |
* --- schedule
* run_command
* main
*
* 10.00% perf libc [.] free
* |
* --- free
* cmd_record
* run_command
* main
*
* 10.00% perf libc [.] malloc
* |
* --- malloc
* cmd_record
* run_command
* main
*
* 10.00% perf perf [.] cmd_record
* |
* --- cmd_record
* run_command
* main
*
*/
struct result expected[] = {
{ 0, 2000, "perf", "perf", "main" },
{ 0, 1000, "bash", "[kernel]", "page_fault" },
{ 0, 1000, "bash", "bash", "main" },
{ 0, 1000, "bash", "bash", "xmalloc" },
{ 0, 1000, "perf", "[kernel]", "page_fault" },
{ 0, 1000, "perf", "[kernel]", "schedule" },
{ 0, 1000, "perf", "libc", "free" },
{ 0, 1000, "perf", "libc", "malloc" },
{ 0, 1000, "perf", "perf", "cmd_record" },
};
struct callchain_result expected_callchain[] = {
{
1, { { "perf", "main" }, },
},
{
3, { { "[kernel]", "page_fault" },
{ "libc", "malloc" },
{ "bash", "main" }, },
},
{
1, { { "bash", "main" }, },
},
{
6, { { "bash", "xmalloc" },
{ "libc", "malloc" },
{ "bash", "xmalloc" },
{ "libc", "malloc" },
{ "bash", "xmalloc" },
{ "bash", "main" }, },
},
{
4, { { "[kernel]", "page_fault" },
{ "[kernel]", "sys_perf_event_open" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
3, { { "[kernel]", "schedule" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
4, { { "libc", "free" },
{ "perf", "cmd_record" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
4, { { "libc", "malloc" },
{ "perf", "cmd_record" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
3, { { "perf", "cmd_record" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
};
symbol_conf.use_callchain = true;
symbol_conf.cumulate_callchain = false;
evsel__set_sample_bit(evsel, CALLCHAIN);
setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
if (err < 0)
goto out;
err = do_test(hists, expected, ARRAY_SIZE(expected),
expected_callchain, ARRAY_SIZE(expected_callchain));
out:
del_hist_entries(hists);
reset_output_field();
return err;
}
/* NO callchain + children */
static int test3(struct evsel *evsel, struct machine *machine)
{
int err;
struct hists *hists = evsel__hists(evsel);
/*
* expected output:
*
* Children Self Command Shared Object Symbol
* ======== ======== ======= ============= =======================
* 70.00% 20.00% perf perf [.] main
* 50.00% 0.00% perf perf [.] run_command
* 30.00% 10.00% bash bash [.] main
* 30.00% 10.00% perf perf [.] cmd_record
* 20.00% 0.00% bash libc [.] malloc
* 10.00% 10.00% bash [kernel] [k] page_fault
* 10.00% 10.00% bash bash [.] xmalloc
* 10.00% 10.00% perf [kernel] [k] page_fault
* 10.00% 10.00% perf libc [.] malloc
* 10.00% 10.00% perf [kernel] [k] schedule
* 10.00% 10.00% perf libc [.] free
* 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
{ 5000, 0, "perf", "perf", "run_command" },
{ 3000, 1000, "bash", "bash", "main" },
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
{ 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
{ 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
{ 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
};
symbol_conf.use_callchain = false;
symbol_conf.cumulate_callchain = true;
evsel__reset_sample_bit(evsel, CALLCHAIN);
setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
if (err < 0)
goto out;
err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
out:
del_hist_entries(hists);
reset_output_field();
return err;
}
/* callchain + children */
static int test4(struct evsel *evsel, struct machine *machine)
{
int err;
struct hists *hists = evsel__hists(evsel);
/*
* expected output:
*
* Children Self Command Shared Object Symbol
* ======== ======== ======= ============= =======================
* 70.00% 20.00% perf perf [.] main
* |
* --- main
*
* 50.00% 0.00% perf perf [.] run_command
* |
* --- run_command
* main
*
* 30.00% 10.00% bash bash [.] main
* |
* --- main
*
* 30.00% 10.00% perf perf [.] cmd_record
* |
* --- cmd_record
* run_command
* main
*
* 20.00% 0.00% bash libc [.] malloc
* |
* --- malloc
* |
* |--50.00%-- xmalloc
* | main
* --50.00%-- main
*
* 10.00% 10.00% bash [kernel] [k] page_fault
* |
* --- page_fault
* malloc
* main
*
* 10.00% 10.00% bash bash [.] xmalloc
* |
* --- xmalloc
* malloc
* xmalloc <--- NOTE: there's a cycle
* malloc
* xmalloc
* main
*
* 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
* |
* --- sys_perf_event_open
* run_command
* main
*
* 10.00% 10.00% perf [kernel] [k] page_fault
* |
* --- page_fault
* sys_perf_event_open
* run_command
* main
*
* 10.00% 10.00% perf [kernel] [k] schedule
* |
* --- schedule
* run_command
* main
*
* 10.00% 10.00% perf libc [.] free
* |
* --- free
* cmd_record
* run_command
* main
*
* 10.00% 10.00% perf libc [.] malloc
* |
* --- malloc
* cmd_record
* run_command
* main
*
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
{ 5000, 0, "perf", "perf", "run_command" },
{ 3000, 1000, "bash", "bash", "main" },
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
{ 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
{ 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
};
struct callchain_result expected_callchain[] = {
{
1, { { "perf", "main" }, },
},
{
2, { { "perf", "run_command" },
{ "perf", "main" }, },
},
{
1, { { "bash", "main" }, },
},
{
3, { { "perf", "cmd_record" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
4, { { "libc", "malloc" },
{ "bash", "xmalloc" },
{ "bash", "main" },
{ "bash", "main" }, },
},
{
3, { { "[kernel]", "page_fault" },
{ "libc", "malloc" },
{ "bash", "main" }, },
},
{
6, { { "bash", "xmalloc" },
{ "libc", "malloc" },
{ "bash", "xmalloc" },
{ "libc", "malloc" },
{ "bash", "xmalloc" },
{ "bash", "main" }, },
},
{
3, { { "[kernel]", "sys_perf_event_open" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
4, { { "[kernel]", "page_fault" },
{ "[kernel]", "sys_perf_event_open" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
3, { { "[kernel]", "schedule" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
4, { { "libc", "free" },
{ "perf", "cmd_record" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
{
4, { { "libc", "malloc" },
{ "perf", "cmd_record" },
{ "perf", "run_command" },
{ "perf", "main" }, },
},
};
symbol_conf.use_callchain = true;
symbol_conf.cumulate_callchain = true;
evsel__set_sample_bit(evsel, CALLCHAIN);
setup_sorting(NULL);
callchain_param = callchain_param_default;
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
if (err < 0)
goto out;
err = do_test(hists, expected, ARRAY_SIZE(expected),
expected_callchain, ARRAY_SIZE(expected_callchain));
out:
del_hist_entries(hists);
reset_output_field();
return err;
}
static int test__hists_cumulate(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
struct machine *machine;
struct evsel *evsel;
struct evlist *evlist = evlist__new();
size_t i;
test_fn_t testcases[] = {
test1,
test2,
test3,
test4,
};
TEST_ASSERT_VAL("No memory", evlist);
err = parse_event(evlist, "cpu-clock");
if (err)
goto out;
err = TEST_FAIL;
machines__init(&machines);
/* setup threads/dso/map/symbols also */
machine = setup_fake_machine(&machines);
if (!machine)
goto out;
if (verbose > 1)
machine__fprintf(machine, stderr);
evsel = evlist__first(evlist);
for (i = 0; i < ARRAY_SIZE(testcases); i++) {
err = testcases[i](evsel, machine);
if (err < 0)
break;
}
out:
/* tear down everything */
evlist__delete(evlist);
machines__exit(&machines);
put_fake_samples();
return err;
}
DEFINE_SUITE("Cumulate child hist entries", hists_cumulate);
| linux-master | tools/perf/tests/hists_cumulate.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/prctl.h>
#include "tests.h"
#include "thread_map.h"
#include "debug.h"
#include "event.h"
#include "util/synthetic-events.h"
#include <linux/zalloc.h>
#include <perf/event.h>
#include <internal/threadmap.h>
struct perf_sample;
struct perf_tool;
struct machine;
#define NAME (const char *) "perf"
#define NAMEUL (unsigned long) NAME
static int test__thread_map(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_thread_map *map;
TEST_ASSERT_VAL("failed to set process name",
!prctl(PR_SET_NAME, NAMEUL, 0, 0, 0));
/* test map on current pid */
map = thread_map__new_by_pid(getpid());
TEST_ASSERT_VAL("failed to alloc map", map);
thread_map__read_comms(map);
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
TEST_ASSERT_VAL("wrong pid",
perf_thread_map__pid(map, 0) == getpid());
TEST_ASSERT_VAL("wrong comm",
perf_thread_map__comm(map, 0) &&
!strcmp(perf_thread_map__comm(map, 0), NAME));
TEST_ASSERT_VAL("wrong refcnt",
refcount_read(&map->refcnt) == 1);
perf_thread_map__put(map);
/* test dummy pid */
map = perf_thread_map__new_dummy();
TEST_ASSERT_VAL("failed to alloc map", map);
thread_map__read_comms(map);
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
TEST_ASSERT_VAL("wrong pid", perf_thread_map__pid(map, 0) == -1);
TEST_ASSERT_VAL("wrong comm",
perf_thread_map__comm(map, 0) &&
!strcmp(perf_thread_map__comm(map, 0), "dummy"));
TEST_ASSERT_VAL("wrong refcnt",
refcount_read(&map->refcnt) == 1);
perf_thread_map__put(map);
return 0;
}
static int process_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_record_thread_map *map = &event->thread_map;
struct perf_thread_map *threads;
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
TEST_ASSERT_VAL("wrong pid", map->entries[0].pid == (u64) getpid());
TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, NAME));
threads = thread_map__new_event(&event->thread_map);
TEST_ASSERT_VAL("failed to alloc map", threads);
TEST_ASSERT_VAL("wrong nr", threads->nr == 1);
TEST_ASSERT_VAL("wrong pid",
perf_thread_map__pid(threads, 0) == getpid());
TEST_ASSERT_VAL("wrong comm",
perf_thread_map__comm(threads, 0) &&
!strcmp(perf_thread_map__comm(threads, 0), NAME));
TEST_ASSERT_VAL("wrong refcnt",
refcount_read(&threads->refcnt) == 1);
perf_thread_map__put(threads);
return 0;
}
static int test__thread_map_synthesize(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_thread_map *threads;
TEST_ASSERT_VAL("failed to set process name",
!prctl(PR_SET_NAME, NAMEUL, 0, 0, 0));
/* test map on current pid */
threads = thread_map__new_by_pid(getpid());
TEST_ASSERT_VAL("failed to alloc map", threads);
thread_map__read_comms(threads);
TEST_ASSERT_VAL("failed to synthesize map",
!perf_event__synthesize_thread_map2(NULL, threads, process_event, NULL));
perf_thread_map__put(threads);
return 0;
}
static int test__thread_map_remove(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_thread_map *threads;
char *str;
TEST_ASSERT_VAL("failed to allocate map string",
asprintf(&str, "%d,%d", getpid(), getppid()) >= 0);
threads = thread_map__new_str(str, NULL, 0, false);
free(str);
TEST_ASSERT_VAL("failed to allocate thread_map",
threads);
if (verbose > 0)
thread_map__fprintf(threads, stderr);
TEST_ASSERT_VAL("failed to remove thread",
!thread_map__remove(threads, 0));
TEST_ASSERT_VAL("thread_map count != 1", threads->nr == 1);
if (verbose > 0)
thread_map__fprintf(threads, stderr);
TEST_ASSERT_VAL("failed to remove thread",
!thread_map__remove(threads, 0));
TEST_ASSERT_VAL("thread_map count != 0", threads->nr == 0);
if (verbose > 0)
thread_map__fprintf(threads, stderr);
TEST_ASSERT_VAL("failed to not remove thread",
thread_map__remove(threads, 0));
perf_thread_map__put(threads);
return 0;
}
DEFINE_SUITE("Thread map", thread_map);
DEFINE_SUITE("Synthesize thread map", thread_map_synthesize);
DEFINE_SUITE("Remove thread map", thread_map_remove);
| linux-master | tools/perf/tests/thread-map.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
* 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
*/
#define __SANE_USERSPACE_TYPES__
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <linux/hw_breakpoint.h>
#include "tests.h"
#include "debug.h"
#include "event.h"
#include "../perf-sys.h"
#include "cloexec.h"
/*
* PowerPC and S390 do not support creation of instruction breakpoints using the
* perf_event interface.
*
* Just disable the test for these architectures until these issues are
* resolved.
*/
#if defined(__powerpc__) || defined(__s390x__)
#define BP_ACCOUNT_IS_SUPPORTED 0
#else
#define BP_ACCOUNT_IS_SUPPORTED 1
#endif
static volatile long the_var;
static noinline int test_function(void)
{
return 0;
}
static int __event(bool is_x, void *addr, struct perf_event_attr *attr)
{
int fd;
memset(attr, 0, sizeof(struct perf_event_attr));
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(struct perf_event_attr);
attr->config = 0;
attr->bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
attr->bp_addr = (unsigned long) addr;
attr->bp_len = sizeof(long);
attr->sample_period = 1;
attr->sample_type = PERF_SAMPLE_IP;
attr->exclude_kernel = 1;
attr->exclude_hv = 1;
fd = sys_perf_event_open(attr, -1, 0, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
pr_debug("failed opening event %llx\n", attr->config);
return TEST_FAIL;
}
return fd;
}
static int wp_event(void *addr, struct perf_event_attr *attr)
{
return __event(false, addr, attr);
}
static int bp_event(void *addr, struct perf_event_attr *attr)
{
return __event(true, addr, attr);
}
static int bp_accounting(int wp_cnt, int share)
{
struct perf_event_attr attr, attr_mod, attr_new;
int i, fd[wp_cnt], fd_wp, ret;
for (i = 0; i < wp_cnt; i++) {
fd[i] = wp_event((void *)&the_var, &attr);
TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1);
pr_debug("wp %d created\n", i);
}
attr_mod = attr;
attr_mod.bp_type = HW_BREAKPOINT_X;
attr_mod.bp_addr = (unsigned long) test_function;
ret = ioctl(fd[0], PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr_mod);
TEST_ASSERT_VAL("failed to modify wp\n", ret == 0);
pr_debug("wp 0 modified to bp\n");
if (!share) {
fd_wp = wp_event((void *)&the_var, &attr_new);
TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1);
pr_debug("wp max created\n");
}
for (i = 0; i < wp_cnt; i++)
close(fd[i]);
return 0;
}
static int detect_cnt(bool is_x)
{
struct perf_event_attr attr;
void *addr = is_x ? (void *)test_function : (void *)&the_var;
int fd[100], cnt = 0, i;
while (1) {
if (cnt == 100) {
pr_debug("way too many debug registers, fix the test\n");
return 0;
}
fd[cnt] = __event(is_x, addr, &attr);
if (fd[cnt] < 0)
break;
cnt++;
}
for (i = 0; i < cnt; i++)
close(fd[i]);
return cnt;
}
static int detect_ioctl(void)
{
struct perf_event_attr attr;
int fd, ret = 1;
fd = wp_event((void *) &the_var, &attr);
if (fd > 0) {
ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr);
close(fd);
}
return ret ? 0 : 1;
}
static int detect_share(int wp_cnt, int bp_cnt)
{
struct perf_event_attr attr;
int i, *fd = NULL, ret = -1;
if (wp_cnt + bp_cnt == 0)
return 0;
fd = malloc(sizeof(int) * (wp_cnt + bp_cnt));
if (!fd)
return -1;
for (i = 0; i < wp_cnt; i++) {
fd[i] = wp_event((void *)&the_var, &attr);
if (fd[i] == -1) {
pr_err("failed to create wp\n");
goto out;
}
}
for (; i < (bp_cnt + wp_cnt); i++) {
fd[i] = bp_event((void *)test_function, &attr);
if (fd[i] == -1)
break;
}
ret = i != (bp_cnt + wp_cnt);
out:
while (i--)
close(fd[i]);
free(fd);
return ret;
}
/*
* This test does following:
* - detects the number of watch/break-points,
* skip test if any is missing
* - detects PERF_EVENT_IOC_MODIFY_ATTRIBUTES ioctl,
* skip test if it's missing
* - detects if watchpoints and breakpoints share
* same slots
* - create all possible watchpoints on cpu 0
* - change one of it to breakpoint
* - in case wp and bp do not share slots,
* we create another watchpoint to ensure
* the slot accounting is correct
*/
static int test__bp_accounting(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int has_ioctl = detect_ioctl();
int wp_cnt = detect_cnt(false);
int bp_cnt = detect_cnt(true);
int share = detect_share(wp_cnt, bp_cnt);
if (!BP_ACCOUNT_IS_SUPPORTED) {
pr_debug("Test not supported on this architecture");
return TEST_SKIP;
}
pr_debug("watchpoints count %d, breakpoints count %d, has_ioctl %d, share %d\n",
wp_cnt, bp_cnt, has_ioctl, share);
if (!wp_cnt || !bp_cnt || !has_ioctl)
return TEST_SKIP;
return bp_accounting(wp_cnt, share);
}
DEFINE_SUITE("Breakpoint accounting", bp_accounting);
| linux-master | tools/perf/tests/bp_account.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <traceevent/event-parse.h>
#include "evsel.h"
#include "tests.h"
#include "debug.h"
static int evsel__test_field(struct evsel *evsel, const char *name, int size, bool should_be_signed)
{
struct tep_format_field *field = evsel__field(evsel, name);
int is_signed;
int ret = 0;
if (field == NULL) {
pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
return -1;
}
is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
ret = -1;
}
if (field->size != size) {
pr_debug("%s: \"%s\" size (%d) should be %d!\n",
evsel->name, name, field->size, size);
ret = -1;
}
return ret;
}
static int test__perf_evsel__tp_sched_test(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct evsel *evsel = evsel__newtp("sched", "sched_switch");
int ret = 0;
if (IS_ERR(evsel)) {
pr_debug("evsel__newtp failed with %ld\n", PTR_ERR(evsel));
return -1;
}
if (evsel__test_field(evsel, "prev_comm", 16, false))
ret = -1;
if (evsel__test_field(evsel, "prev_pid", 4, true))
ret = -1;
if (evsel__test_field(evsel, "prev_prio", 4, true))
ret = -1;
if (evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1;
if (evsel__test_field(evsel, "next_comm", 16, false))
ret = -1;
if (evsel__test_field(evsel, "next_pid", 4, true))
ret = -1;
if (evsel__test_field(evsel, "next_prio", 4, true))
ret = -1;
evsel__delete(evsel);
evsel = evsel__newtp("sched", "sched_wakeup");
if (IS_ERR(evsel)) {
pr_debug("evsel__newtp failed with %ld\n", PTR_ERR(evsel));
return -1;
}
if (evsel__test_field(evsel, "comm", 16, false))
ret = -1;
if (evsel__test_field(evsel, "pid", 4, true))
ret = -1;
if (evsel__test_field(evsel, "prio", 4, true))
ret = -1;
if (evsel__test_field(evsel, "target_cpu", 4, true))
ret = -1;
evsel__delete(evsel);
return ret;
}
DEFINE_SUITE("Parse sched tracepoints fields", perf_evsel__tp_sched_test);
| linux-master | tools/perf/tests/evsel-tp-sched.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/string.h>
#include <errno.h>
#include <inttypes.h>
#include <string.h>
#include <sys/wait.h>
#include <perf/cpumap.h>
#include "tests.h"
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
#include "parse-events.h"
#include "thread_map.h"
#include "target.h"
static int attach__enable_on_exec(struct evlist *evlist)
{
struct evsel *evsel = evlist__last(evlist);
struct target target = {
.uid = UINT_MAX,
};
const char *argv[] = { "true", NULL, };
char sbuf[STRERR_BUFSIZE];
int err;
pr_debug("attaching to spawned child, enable on exec\n");
err = evlist__create_maps(evlist, &target);
if (err < 0) {
pr_debug("Not enough memory to create thread/cpu maps\n");
return err;
}
err = evlist__prepare_workload(evlist, &target, argv, false, NULL);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
return err;
}
evsel->core.attr.enable_on_exec = 1;
err = evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
return err;
}
return evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
}
static int detach__enable_on_exec(struct evlist *evlist)
{
waitpid(evlist->workload.pid, NULL, 0);
return 0;
}
static int attach__current_disabled(struct evlist *evlist)
{
struct evsel *evsel = evlist__last(evlist);
struct perf_thread_map *threads;
int err;
pr_debug("attaching to current thread as disabled\n");
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
}
evsel->core.attr.disabled = 1;
err = evsel__open_per_thread(evsel, threads);
if (err) {
pr_debug("Failed to open event cpu-clock:u\n");
return err;
}
perf_thread_map__put(threads);
return evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
}
static int attach__current_enabled(struct evlist *evlist)
{
struct evsel *evsel = evlist__last(evlist);
struct perf_thread_map *threads;
int err;
pr_debug("attaching to current thread as enabled\n");
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
pr_debug("failed to call thread_map__new\n");
return -1;
}
err = evsel__open_per_thread(evsel, threads);
perf_thread_map__put(threads);
return err == 0 ? TEST_OK : TEST_FAIL;
}
static int detach__disable(struct evlist *evlist)
{
struct evsel *evsel = evlist__last(evlist);
return evsel__enable(evsel);
}
static int attach__cpu_disabled(struct evlist *evlist)
{
struct evsel *evsel = evlist__last(evlist);
struct perf_cpu_map *cpus;
int err;
pr_debug("attaching to CPU 0 as enabled\n");
cpus = perf_cpu_map__new("0");
if (cpus == NULL) {
pr_debug("failed to call perf_cpu_map__new\n");
return -1;
}
evsel->core.attr.disabled = 1;
err = evsel__open_per_cpu(evsel, cpus, -1);
if (err) {
if (err == -EACCES)
return TEST_SKIP;
pr_debug("Failed to open event cpu-clock:u\n");
return err;
}
perf_cpu_map__put(cpus);
return evsel__enable(evsel);
}
static int attach__cpu_enabled(struct evlist *evlist)
{
struct evsel *evsel = evlist__last(evlist);
struct perf_cpu_map *cpus;
int err;
pr_debug("attaching to CPU 0 as enabled\n");
cpus = perf_cpu_map__new("0");
if (cpus == NULL) {
pr_debug("failed to call perf_cpu_map__new\n");
return -1;
}
err = evsel__open_per_cpu(evsel, cpus, -1);
if (err == -EACCES)
return TEST_SKIP;
perf_cpu_map__put(cpus);
return err ? TEST_FAIL : TEST_OK;
}
static int test_times(int (attach)(struct evlist *),
int (detach)(struct evlist *))
{
struct perf_counts_values count;
struct evlist *evlist = NULL;
struct evsel *evsel;
int err = -1, i;
evlist = evlist__new();
if (!evlist) {
pr_debug("failed to create event list\n");
goto out_err;
}
err = parse_event(evlist, "cpu-clock:u");
if (err) {
pr_debug("failed to parse event cpu-clock:u\n");
goto out_err;
}
evsel = evlist__last(evlist);
evsel->core.attr.read_format |=
PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
err = attach(evlist);
if (err == TEST_SKIP) {
pr_debug(" SKIP : not enough rights\n");
return err;
}
TEST_ASSERT_VAL("failed to attach", !err);
for (i = 0; i < 100000000; i++) { }
TEST_ASSERT_VAL("failed to detach", !detach(evlist));
perf_evsel__read(&evsel->core, 0, 0, &count);
err = !(count.ena == count.run);
pr_debug(" %s: ena %" PRIu64", run %" PRIu64"\n",
!err ? "OK " : "FAILED",
count.ena, count.run);
out_err:
evlist__delete(evlist);
return !err ? TEST_OK : TEST_FAIL;
}
/*
* This test creates software event 'cpu-clock'
* attaches it in several ways (explained below)
* and checks that enabled and running times
* match.
*/
static int test__event_times(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err, ret = 0;
#define _T(attach, detach) \
err = test_times(attach, detach); \
if (err && (ret == TEST_OK || ret == TEST_SKIP)) \
ret = err;
/* attach on newly spawned process after exec */
_T(attach__enable_on_exec, detach__enable_on_exec)
/* attach on current process as enabled */
_T(attach__current_enabled, detach__disable)
/* attach on current process as disabled */
_T(attach__current_disabled, detach__disable)
/* attach on cpu as disabled */
_T(attach__cpu_disabled, detach__disable)
/* attach on cpu as enabled */
_T(attach__cpu_enabled, detach__disable)
#undef _T
return ret;
}
DEFINE_SUITE("Event times", event_times);
| linux-master | tools/perf/tests/event-times.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <linux/err.h>
#include <linux/string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "record.h"
#include "tests.h"
#include "debug.h"
#include "util/mmap.h"
#include <errno.h>
#include <perf/mmap.h>
#include "util/sample.h"
#ifndef O_DIRECTORY
#define O_DIRECTORY 00200000
#endif
#ifndef AT_FDCWD
#define AT_FDCWD -100
#endif
static int test__syscall_openat_tp_fields(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct record_opts opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
.no_buffering = true,
.freq = 1,
.mmap_pages = 256,
.raw_samples = true,
};
const char *filename = "/etc/passwd";
int flags = O_RDONLY | O_DIRECTORY;
struct evlist *evlist = evlist__new();
struct evsel *evsel;
int err = -1, i, nr_events = 0, nr_polls = 0;
char sbuf[STRERR_BUFSIZE];
if (evlist == NULL) {
pr_debug("%s: evlist__new\n", __func__);
goto out;
}
evsel = evsel__newtp("syscalls", "sys_enter_openat");
if (IS_ERR(evsel)) {
pr_debug("%s: evsel__newtp\n", __func__);
goto out_delete_evlist;
}
evlist__add(evlist, evsel);
err = evlist__create_maps(evlist, &opts.target);
if (err < 0) {
pr_debug("%s: evlist__create_maps\n", __func__);
goto out_delete_evlist;
}
evsel__config(evsel, &opts, NULL);
perf_thread_map__set_pid(evlist->core.threads, 0, getpid());
err = evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
err = evlist__mmap(evlist, UINT_MAX);
if (err < 0) {
pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
evlist__enable(evlist);
/*
* Generate the event:
*/
openat(AT_FDCWD, filename, flags);
while (1) {
int before = nr_events;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
struct mmap *md;
md = &evlist->mmap[i];
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
int tp_flags;
struct perf_sample sample;
++nr_events;
if (type != PERF_RECORD_SAMPLE) {
perf_mmap__consume(&md->core);
continue;
}
err = evsel__parse_sample(evsel, event, &sample);
if (err) {
pr_debug("Can't parse sample, err = %d\n", err);
goto out_delete_evlist;
}
tp_flags = evsel__intval(evsel, &sample, "flags");
if (flags != tp_flags) {
pr_debug("%s: Expected flags=%#x, got %#x\n",
__func__, flags, tp_flags);
goto out_delete_evlist;
}
goto out_ok;
}
perf_mmap__read_done(&md->core);
}
if (nr_events == before)
evlist__poll(evlist, 10);
if (++nr_polls > 5) {
pr_debug("%s: no events!\n", __func__);
goto out_delete_evlist;
}
}
out_ok:
err = 0;
out_delete_evlist:
evlist__delete(evlist);
out:
return err;
}
DEFINE_SUITE("syscalls:sys_enter_openat event fields", syscall_openat_tp_fields);
| linux-master | tools/perf/tests/openat-syscall-tp-fields.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/mman.h>
#include <linux/string.h>
#include "tests.h"
#include "util/debug.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/cpumap.h"
#include "util/mmap.h"
#include "util/sample.h"
#include "util/thread_map.h"
#include <perf/evlist.h>
#include <perf/mmap.h>
#define NR_LOOPS 10000000
/*
* This test will open software clock events (cpu-clock, task-clock)
* then check their frequency -> period conversion has no artifact of
* setting period to 1 forcefully.
*/
static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
{
int i, err = -1;
volatile int tmp = 0;
u64 total_periods = 0;
int nr_samples = 0;
char sbuf[STRERR_BUFSIZE];
union perf_event *event;
struct evsel *evsel;
struct evlist *evlist;
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = clock_id,
.sample_type = PERF_SAMPLE_PERIOD,
.exclude_kernel = 1,
.disabled = 1,
.freq = 1,
};
struct perf_cpu_map *cpus = NULL;
struct perf_thread_map *threads = NULL;
struct mmap *md;
attr.sample_freq = 500;
evlist = evlist__new();
if (evlist == NULL) {
pr_debug("evlist__new\n");
return -1;
}
evsel = evsel__new(&attr);
if (evsel == NULL) {
pr_debug("evsel__new\n");
goto out_delete_evlist;
}
evlist__add(evlist, evsel);
cpus = perf_cpu_map__dummy_new();
threads = thread_map__new_by_tid(getpid());
if (!cpus || !threads) {
err = -ENOMEM;
pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_evlist;
}
perf_evlist__set_maps(&evlist->core, cpus, threads);
if (evlist__open(evlist)) {
const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
err = -errno;
pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
str_error_r(errno, sbuf, sizeof(sbuf)),
knob, (u64)attr.sample_freq);
goto out_delete_evlist;
}
err = evlist__mmap(evlist, 128);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
evlist__enable(evlist);
/* collect samples */
for (i = 0; i < NR_LOOPS; i++)
tmp++;
evlist__disable(evlist);
md = &evlist->mmap[0];
if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE)
goto next_event;
err = evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
pr_debug("Error during parse sample\n");
goto out_delete_evlist;
}
total_periods += sample.period;
nr_samples++;
next_event:
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(&md->core);
out_init:
if ((u64) nr_samples == total_periods) {
pr_debug("All (%d) samples have period value of 1!\n",
nr_samples);
err = -1;
}
out_delete_evlist:
perf_cpu_map__put(cpus);
perf_thread_map__put(threads);
evlist__delete(evlist);
return err;
}
static int test__sw_clock_freq(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret;
ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK);
if (!ret)
ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK);
return ret;
}
DEFINE_SUITE("Software clock events period values", sw_clock_freq);
| linux-master | tools/perf/tests/sw-clock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Originally done by Vince Weaver <[email protected]> for
* perf_event_tests (git://github.com/deater/perf_event_tests)
*/
/*
* Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
* 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
*/
#define __SANE_USERSPACE_TYPES__
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/ioctl.h>
#include <time.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
#include "tests.h"
#include "debug.h"
#include "event.h"
#include "../perf-sys.h"
#include "cloexec.h"
static int overflows;
static noinline int test_function(void)
{
return time(NULL);
}
static void sig_handler(int signum __maybe_unused,
siginfo_t *oh __maybe_unused,
void *uc __maybe_unused)
{
overflows++;
}
static long long bp_count(int fd)
{
long long count;
int ret;
ret = read(fd, &count, sizeof(long long));
if (ret != sizeof(long long)) {
pr_debug("failed to read: %d\n", ret);
return TEST_FAIL;
}
return count;
}
#define EXECUTIONS 10000
#define THRESHOLD 100
static int test__bp_signal_overflow(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_event_attr pe;
struct sigaction sa;
long long count;
int fd, i, fails = 0;
if (!BP_SIGNAL_IS_SUPPORTED) {
pr_debug("Test not supported on this architecture");
return TEST_SKIP;
}
/* setup SIGIO signal handler */
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_sigaction = (void *) sig_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGIO, &sa, NULL) < 0) {
pr_debug("failed setting up signal handler\n");
return TEST_FAIL;
}
memset(&pe, 0, sizeof(struct perf_event_attr));
pe.type = PERF_TYPE_BREAKPOINT;
pe.size = sizeof(struct perf_event_attr);
pe.config = 0;
pe.bp_type = HW_BREAKPOINT_X;
pe.bp_addr = (unsigned long) test_function;
pe.bp_len = sizeof(long);
pe.sample_period = THRESHOLD;
pe.sample_type = PERF_SAMPLE_IP;
pe.wakeup_events = 1;
pe.disabled = 1;
pe.exclude_kernel = 1;
pe.exclude_hv = 1;
fd = sys_perf_event_open(&pe, 0, -1, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
pr_debug("failed opening event %llx\n", pe.config);
return TEST_FAIL;
}
fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
fcntl(fd, F_SETSIG, SIGIO);
fcntl(fd, F_SETOWN, getpid());
ioctl(fd, PERF_EVENT_IOC_RESET, 0);
ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
for (i = 0; i < EXECUTIONS; i++)
test_function();
ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
count = bp_count(fd);
close(fd);
pr_debug("count %lld, overflow %d\n",
count, overflows);
if (count != EXECUTIONS) {
pr_debug("\tWrong number of executions %lld != %d\n",
count, EXECUTIONS);
fails++;
}
if (overflows != EXECUTIONS / THRESHOLD) {
pr_debug("\tWrong number of overflows %d != %d\n",
overflows, EXECUTIONS / THRESHOLD);
fails++;
}
return fails ? TEST_FAIL : TEST_OK;
}
DEFINE_SUITE("Breakpoint overflow sampling", bp_signal_overflow);
| linux-master | tools/perf/tests/bp_signal_overflow.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The struct perf_event_attr test support.
*
* This test is embedded inside into perf directly and is governed
* by the PERF_TEST_ATTR environment variable and hook inside
* sys_perf_event_open function.
*
* The general idea is to store 'struct perf_event_attr' details for
* each event created within single perf command. Each event details
* are stored into separate text file. Once perf command is finished
* these files can be checked for values we expect for command.
*
* Besides 'struct perf_event_attr' values we also store 'fd' and
* 'group_fd' values to allow checking for groups created.
*
* This all is triggered by setting PERF_TEST_ATTR environment variable.
* It must contain name of existing directory with access and write
* permissions. All the event text files are stored there.
*/
#include <debug.h>
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <stdio.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <subcmd/exec-cmd.h>
#include "event.h"
#include "util.h"
#include "tests.h"
#include "pmus.h"
#define ENV "PERF_TEST_ATTR"
static char *dir;
static bool ready;
void test_attr__init(void)
{
dir = getenv(ENV);
test_attr__enabled = (dir != NULL);
}
#define BUFSIZE 1024
#define __WRITE_ASS(str, fmt, data) \
do { \
char buf[BUFSIZE]; \
size_t size; \
\
size = snprintf(buf, BUFSIZE, #str "=%"fmt "\n", data); \
if (1 != fwrite(buf, size, 1, file)) { \
perror("test attr - failed to write event file"); \
fclose(file); \
return -1; \
} \
\
} while (0)
#define WRITE_ASS(field, fmt) __WRITE_ASS(field, fmt, attr->field)
static int store_event(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
int fd, int group_fd, unsigned long flags)
{
FILE *file;
char path[PATH_MAX];
if (!ready)
return 0;
snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir,
attr->type, attr->config, fd);
file = fopen(path, "w+");
if (!file) {
perror("test attr - failed to open event file");
return -1;
}
if (fprintf(file, "[event-%d-%llu-%d]\n",
attr->type, attr->config, fd) < 0) {
perror("test attr - failed to write event file");
fclose(file);
return -1;
}
/* syscall arguments */
__WRITE_ASS(fd, "d", fd);
__WRITE_ASS(group_fd, "d", group_fd);
__WRITE_ASS(cpu, "d", cpu.cpu);
__WRITE_ASS(pid, "d", pid);
__WRITE_ASS(flags, "lu", flags);
/* struct perf_event_attr */
WRITE_ASS(type, PRIu32);
WRITE_ASS(size, PRIu32);
WRITE_ASS(config, "llu");
WRITE_ASS(sample_period, "llu");
WRITE_ASS(sample_type, "llu");
WRITE_ASS(read_format, "llu");
WRITE_ASS(disabled, "d");
WRITE_ASS(inherit, "d");
WRITE_ASS(pinned, "d");
WRITE_ASS(exclusive, "d");
WRITE_ASS(exclude_user, "d");
WRITE_ASS(exclude_kernel, "d");
WRITE_ASS(exclude_hv, "d");
WRITE_ASS(exclude_idle, "d");
WRITE_ASS(mmap, "d");
WRITE_ASS(comm, "d");
WRITE_ASS(freq, "d");
WRITE_ASS(inherit_stat, "d");
WRITE_ASS(enable_on_exec, "d");
WRITE_ASS(task, "d");
WRITE_ASS(watermark, "d");
WRITE_ASS(precise_ip, "d");
WRITE_ASS(mmap_data, "d");
WRITE_ASS(sample_id_all, "d");
WRITE_ASS(exclude_host, "d");
WRITE_ASS(exclude_guest, "d");
WRITE_ASS(exclude_callchain_kernel, "d");
WRITE_ASS(exclude_callchain_user, "d");
WRITE_ASS(mmap2, "d");
WRITE_ASS(comm_exec, "d");
WRITE_ASS(context_switch, "d");
WRITE_ASS(write_backward, "d");
WRITE_ASS(namespaces, "d");
WRITE_ASS(use_clockid, "d");
WRITE_ASS(wakeup_events, PRIu32);
WRITE_ASS(bp_type, PRIu32);
WRITE_ASS(config1, "llu");
WRITE_ASS(config2, "llu");
WRITE_ASS(branch_sample_type, "llu");
WRITE_ASS(sample_regs_user, "llu");
WRITE_ASS(sample_stack_user, PRIu32);
fclose(file);
return 0;
}
void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
int fd, int group_fd, unsigned long flags)
{
int errno_saved = errno;
if ((fd != -1) && store_event(attr, pid, cpu, fd, group_fd, flags)) {
pr_err("test attr FAILED");
exit(128);
}
errno = errno_saved;
}
void test_attr__ready(void)
{
if (unlikely(test_attr__enabled) && !ready)
ready = true;
}
static int run_dir(const char *d, const char *perf)
{
char v[] = "-vvvvv";
int vcnt = min(verbose, (int) sizeof(v) - 1);
char cmd[3*PATH_MAX];
if (verbose > 0)
vcnt++;
scnprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
d, d, perf, vcnt, v);
return system(cmd) ? TEST_FAIL : TEST_OK;
}
static int test__attr(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct stat st;
char path_perf[PATH_MAX];
char path_dir[PATH_MAX];
char *exec_path;
if (perf_pmus__num_core_pmus() > 1) {
/*
* TODO: Attribute tests hard code the PMU type. If there are >1
* core PMU then each PMU will have a different type whic
* requires additional support.
*/
pr_debug("Skip test on hybrid systems");
return TEST_SKIP;
}
/* First try development tree tests. */
if (!lstat("./tests", &st))
return run_dir("./tests", "./perf");
exec_path = get_argv_exec_path();
if (exec_path == NULL)
return -1;
/* Then installed path. */
snprintf(path_dir, PATH_MAX, "%s/tests", exec_path);
snprintf(path_perf, PATH_MAX, "%s/perf", BINDIR);
free(exec_path);
if (!lstat(path_dir, &st) &&
!lstat(path_perf, &st))
return run_dir(path_dir, path_perf);
return TEST_SKIP;
}
DEFINE_SUITE("Setup struct perf_event_attr", attr);
| linux-master | tools/perf/tests/attr.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
#include <linux/kernel.h>
#include "tests.h"
#include "debug.h"
#include "event.h"
#include "cloexec.h"
#include "../perf-sys.h"
#define WP_TEST_ASSERT_VAL(fd, text, val) \
do { \
long long count; \
wp_read(fd, &count, sizeof(long long)); \
TEST_ASSERT_VAL(text, count == val); \
} while (0)
volatile u64 data1;
volatile u8 data2[3];
#ifndef __s390x__
static int wp_read(int fd, long long *count, int size)
{
int ret = read(fd, count, size);
if (ret != size) {
pr_debug("failed to read: %d\n", ret);
return -1;
}
return 0;
}
static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type,
void *wp_addr, unsigned long wp_len)
{
memset(attr, 0, sizeof(struct perf_event_attr));
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(struct perf_event_attr);
attr->config = 0;
attr->bp_type = wp_type;
attr->bp_addr = (unsigned long)wp_addr;
attr->bp_len = wp_len;
attr->sample_period = 1;
attr->sample_type = PERF_SAMPLE_IP;
attr->exclude_kernel = 1;
attr->exclude_hv = 1;
}
static int __event(int wp_type, void *wp_addr, unsigned long wp_len)
{
int fd;
struct perf_event_attr attr;
get__perf_event_attr(&attr, wp_type, wp_addr, wp_len);
fd = sys_perf_event_open(&attr, 0, -1, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
fd = -errno;
pr_debug("failed opening event %x\n", attr.bp_type);
}
return fd;
}
#endif
static int test__wp_ro(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
#if defined(__s390x__) || defined(__x86_64__) || defined(__i386__)
return TEST_SKIP;
#else
int fd;
unsigned long tmp, tmp1 = rand();
fd = __event(HW_BREAKPOINT_R, (void *)&data1, sizeof(data1));
if (fd < 0)
return fd == -ENODEV ? TEST_SKIP : -1;
tmp = data1;
WP_TEST_ASSERT_VAL(fd, "RO watchpoint", 1);
data1 = tmp1 + tmp;
WP_TEST_ASSERT_VAL(fd, "RO watchpoint", 1);
close(fd);
return 0;
#endif
}
static int test__wp_wo(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
#if defined(__s390x__)
return TEST_SKIP;
#else
int fd;
unsigned long tmp, tmp1 = rand();
fd = __event(HW_BREAKPOINT_W, (void *)&data1, sizeof(data1));
if (fd < 0)
return fd == -ENODEV ? TEST_SKIP : -1;
tmp = data1;
WP_TEST_ASSERT_VAL(fd, "WO watchpoint", 0);
data1 = tmp1 + tmp;
WP_TEST_ASSERT_VAL(fd, "WO watchpoint", 1);
close(fd);
return 0;
#endif
}
static int test__wp_rw(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
#if defined(__s390x__)
return TEST_SKIP;
#else
int fd;
unsigned long tmp, tmp1 = rand();
fd = __event(HW_BREAKPOINT_R | HW_BREAKPOINT_W, (void *)&data1,
sizeof(data1));
if (fd < 0)
return fd == -ENODEV ? TEST_SKIP : -1;
tmp = data1;
WP_TEST_ASSERT_VAL(fd, "RW watchpoint", 1);
data1 = tmp1 + tmp;
WP_TEST_ASSERT_VAL(fd, "RW watchpoint", 2);
close(fd);
return 0;
#endif
}
static int test__wp_modify(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
#if defined(__s390x__)
return TEST_SKIP;
#else
int fd, ret;
unsigned long tmp = rand();
struct perf_event_attr new_attr;
fd = __event(HW_BREAKPOINT_W, (void *)&data1, sizeof(data1));
if (fd < 0)
return fd == -ENODEV ? TEST_SKIP : -1;
data1 = tmp;
WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 1);
/* Modify watchpoint with disabled = 1 */
get__perf_event_attr(&new_attr, HW_BREAKPOINT_W, (void *)&data2[0],
sizeof(u8) * 2);
new_attr.disabled = 1;
ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr);
if (ret < 0) {
if (errno == ENOTTY) {
test->test_cases[subtest].skip_reason = "missing kernel support";
ret = TEST_SKIP;
}
pr_debug("ioctl(PERF_EVENT_IOC_MODIFY_ATTRIBUTES) failed\n");
close(fd);
return ret;
}
data2[1] = tmp; /* Not Counted */
WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 1);
/* Enable the event */
ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
if (ret < 0) {
pr_debug("Failed to enable event\n");
close(fd);
return ret;
}
data2[1] = tmp; /* Counted */
WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 2);
data2[2] = tmp; /* Not Counted */
WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 2);
close(fd);
return 0;
#endif
}
static struct test_case wp_tests[] = {
TEST_CASE_REASON("Read Only Watchpoint", wp_ro, "missing hardware support"),
TEST_CASE_REASON("Write Only Watchpoint", wp_wo, "missing hardware support"),
TEST_CASE_REASON("Read / Write Watchpoint", wp_rw, "missing hardware support"),
TEST_CASE_REASON("Modify Watchpoint", wp_modify, "missing hardware support"),
{ .name = NULL, }
};
struct test_suite suite__wp = {
.desc = "Watchpoint",
.test_cases = wp_tests,
};
| linux-master | tools/perf/tests/wp.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "tests.h"
#include <api/io.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#define TEMPL "/tmp/perf-test-XXXXXX"
#define EXPECT_EQUAL(val, expected) \
do { \
if (val != expected) { \
pr_debug("%s:%d: %d != %d\n", \
__FILE__, __LINE__, val, expected); \
ret = -1; \
} \
} while (0)
#define EXPECT_EQUAL64(val, expected) \
do { \
if (val != expected) { \
pr_debug("%s:%d: %lld != %lld\n", \
__FILE__, __LINE__, val, expected); \
ret = -1; \
} \
} while (0)
static int make_test_file(char path[PATH_MAX], const char *contents)
{
ssize_t contents_len = strlen(contents);
int fd;
strcpy(path, TEMPL);
fd = mkstemp(path);
if (fd < 0) {
pr_debug("mkstemp failed");
return -1;
}
if (write(fd, contents, contents_len) < contents_len) {
pr_debug("short write");
close(fd);
unlink(path);
return -1;
}
close(fd);
return 0;
}
static int setup_test(char path[PATH_MAX], const char *contents,
size_t buf_size, struct io *io)
{
if (make_test_file(path, contents))
return -1;
io->fd = open(path, O_RDONLY);
if (io->fd < 0) {
pr_debug("Failed to open '%s'\n", path);
unlink(path);
return -1;
}
io->buf = malloc(buf_size);
if (io->buf == NULL) {
pr_debug("Failed to allocate memory");
close(io->fd);
unlink(path);
return -1;
}
io__init(io, io->fd, io->buf, buf_size);
return 0;
}
static void cleanup_test(char path[PATH_MAX], struct io *io)
{
zfree(&io->buf);
close(io->fd);
unlink(path);
}
static int do_test_get_char(const char *test_string, size_t buf_size)
{
char path[PATH_MAX];
struct io io;
int ch, ret = 0;
size_t i;
if (setup_test(path, test_string, buf_size, &io))
return -1;
for (i = 0; i < strlen(test_string); i++) {
ch = io__get_char(&io);
EXPECT_EQUAL(ch, test_string[i]);
EXPECT_EQUAL(io.eof, false);
}
ch = io__get_char(&io);
EXPECT_EQUAL(ch, -1);
EXPECT_EQUAL(io.eof, true);
cleanup_test(path, &io);
return ret;
}
static int test_get_char(void)
{
int i, ret = 0;
size_t j;
static const char *const test_strings[] = {
"12345678abcdef90",
"a\nb\nc\nd\n",
"\a\b\t\v\f\r",
};
for (i = 0; i <= 10; i++) {
for (j = 0; j < ARRAY_SIZE(test_strings); j++) {
if (do_test_get_char(test_strings[j], 1 << i))
ret = -1;
}
}
return ret;
}
static int do_test_get_hex(const char *test_string,
__u64 val1, int ch1,
__u64 val2, int ch2,
__u64 val3, int ch3,
bool end_eof)
{
char path[PATH_MAX];
struct io io;
int ch, ret = 0;
__u64 hex;
if (setup_test(path, test_string, 4, &io))
return -1;
ch = io__get_hex(&io, &hex);
EXPECT_EQUAL64(hex, val1);
EXPECT_EQUAL(ch, ch1);
ch = io__get_hex(&io, &hex);
EXPECT_EQUAL64(hex, val2);
EXPECT_EQUAL(ch, ch2);
ch = io__get_hex(&io, &hex);
EXPECT_EQUAL64(hex, val3);
EXPECT_EQUAL(ch, ch3);
EXPECT_EQUAL(io.eof, end_eof);
cleanup_test(path, &io);
return ret;
}
static int test_get_hex(void)
{
int ret = 0;
if (do_test_get_hex("12345678abcdef90",
0x12345678abcdef90, -1,
0, -1,
0, -1,
true))
ret = -1;
if (do_test_get_hex("1\n2\n3\n",
1, '\n',
2, '\n',
3, '\n',
false))
ret = -1;
if (do_test_get_hex("12345678ABCDEF90;a;b",
0x12345678abcdef90, ';',
0xa, ';',
0xb, -1,
true))
ret = -1;
if (do_test_get_hex("0x1x2x",
0, 'x',
1, 'x',
2, 'x',
false))
ret = -1;
if (do_test_get_hex("x1x",
0, -2,
1, 'x',
0, -1,
true))
ret = -1;
if (do_test_get_hex("10000000000000000000000000000abcdefgh99i",
0xabcdef, 'g',
0, -2,
0x99, 'i',
false))
ret = -1;
return ret;
}
static int do_test_get_dec(const char *test_string,
__u64 val1, int ch1,
__u64 val2, int ch2,
__u64 val3, int ch3,
bool end_eof)
{
char path[PATH_MAX];
struct io io;
int ch, ret = 0;
__u64 dec;
if (setup_test(path, test_string, 4, &io))
return -1;
ch = io__get_dec(&io, &dec);
EXPECT_EQUAL64(dec, val1);
EXPECT_EQUAL(ch, ch1);
ch = io__get_dec(&io, &dec);
EXPECT_EQUAL64(dec, val2);
EXPECT_EQUAL(ch, ch2);
ch = io__get_dec(&io, &dec);
EXPECT_EQUAL64(dec, val3);
EXPECT_EQUAL(ch, ch3);
EXPECT_EQUAL(io.eof, end_eof);
cleanup_test(path, &io);
return ret;
}
static int test_get_dec(void)
{
int ret = 0;
if (do_test_get_dec("12345678abcdef90",
12345678, 'a',
0, -2,
0, -2,
false))
ret = -1;
if (do_test_get_dec("1\n2\n3\n",
1, '\n',
2, '\n',
3, '\n',
false))
ret = -1;
if (do_test_get_dec("12345678;1;2",
12345678, ';',
1, ';',
2, -1,
true))
ret = -1;
if (do_test_get_dec("0x1x2x",
0, 'x',
1, 'x',
2, 'x',
false))
ret = -1;
if (do_test_get_dec("x1x",
0, -2,
1, 'x',
0, -1,
true))
ret = -1;
if (do_test_get_dec("10000000000000000000000000000000000000000000000000000000000123456789ab99c",
123456789, 'a',
0, -2,
99, 'c',
false))
ret = -1;
return ret;
}
static int test_get_line(void)
{
char path[PATH_MAX];
struct io io;
char test_string[1024];
char *line = NULL;
size_t i, line_len = 0;
size_t buf_size = 128;
int ret = 0;
for (i = 0; i < 512; i++)
test_string[i] = 'a';
test_string[512] = '\n';
for (i = 513; i < 1023; i++)
test_string[i] = 'b';
test_string[1023] = '\0';
if (setup_test(path, test_string, buf_size, &io))
return -1;
EXPECT_EQUAL((int)io__getline(&io, &line, &line_len), 513);
EXPECT_EQUAL((int)strlen(line), 513);
for (i = 0; i < 512; i++)
EXPECT_EQUAL(line[i], 'a');
EXPECT_EQUAL(line[512], '\n');
EXPECT_EQUAL((int)io__getline(&io, &line, &line_len), 510);
for (i = 0; i < 510; i++)
EXPECT_EQUAL(line[i], 'b');
free(line);
cleanup_test(path, &io);
return ret;
}
static int test__api_io(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int ret = 0;
if (test_get_char())
ret = TEST_FAIL;
if (test_get_hex())
ret = TEST_FAIL;
if (test_get_dec())
ret = TEST_FAIL;
if (test_get_line())
ret = TEST_FAIL;
return ret;
}
DEFINE_SUITE("Test api io", api_io);
| linux-master | tools/perf/tests/api-io.c |
// SPDX-License-Identifier: GPL-2.0
#include "parse-events.h"
#include "evsel.h"
#include "evlist.h"
#include <api/fs/fs.h>
#include "tests.h"
#include "debug.h"
#include "pmu.h"
#include "pmus.h"
#include <dirent.h>
#include <errno.h>
#include "fncache.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <linux/kernel.h>
#include <linux/hw_breakpoint.h>
#include <api/fs/tracing_path.h>
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
static int num_core_entries(void)
{
/*
* If the kernel supports extended type, expect events to be
* opened once for each core PMU type. Otherwise fall back to the legacy
* behavior of opening only one event even though there are multiple
* PMUs
*/
if (perf_pmus__supports_extended_type())
return perf_pmus__num_core_pmus();
return 1;
}
static bool test_config(const struct evsel *evsel, __u64 expected_config)
{
__u32 type = evsel->core.attr.type;
__u64 config = evsel->core.attr.config;
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) {
/*
* HARDWARE and HW_CACHE events encode the PMU's extended type
* in the top 32-bits. Mask in order to ignore.
*/
config &= PERF_HW_EVENT_MASK;
}
return config == expected_config;
}
static bool test_perf_config(const struct perf_evsel *evsel, __u64 expected_config)
{
return (evsel->attr.config & PERF_HW_EVENT_MASK) == expected_config;
}
#ifdef HAVE_LIBTRACEEVENT
#if defined(__s390x__)
/* Return true if kvm module is available and loaded. Test this
* and return success when trace point kvm_s390_create_vm
* exists. Otherwise this test always fails.
*/
static bool kvm_s390_create_vm_valid(void)
{
char *eventfile;
bool rc = false;
eventfile = get_events_file("kvm-s390");
if (eventfile) {
DIR *mydir = opendir(eventfile);
if (mydir) {
rc = true;
closedir(mydir);
}
put_events_file(eventfile);
}
return rc;
}
#endif
static int test__checkevent_tracepoint(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 0 == evlist__nr_groups(evlist));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
return TEST_OK;
}
static int test__checkevent_tracepoint_multi(struct evlist *evlist)
{
struct evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1);
TEST_ASSERT_VAL("wrong number of groups", 0 == evlist__nr_groups(evlist));
evlist__for_each_entry(evlist, evsel) {
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period",
1 == evsel->core.attr.sample_period);
}
return TEST_OK;
}
#endif /* HAVE_LIBTRACEEVENT */
static int test__checkevent_raw(struct evlist *evlist)
{
struct perf_evsel *evsel;
bool raw_type_match = false;
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_evsel(&evlist->core, evsel) {
struct perf_pmu *pmu __maybe_unused = NULL;
bool type_matched = false;
TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 0x1a));
TEST_ASSERT_VAL("event not parsed as raw type",
evsel->attr.type == PERF_TYPE_RAW);
#if defined(__aarch64__)
/*
* Arm doesn't have a real raw type PMU in sysfs, so raw events
* would never match any PMU. However, RAW events on Arm will
* always successfully open on the first available core PMU
* so no need to test for a matching type here.
*/
type_matched = raw_type_match = true;
#else
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
if (pmu->type == evsel->attr.type) {
TEST_ASSERT_VAL("PMU type expected once", !type_matched);
type_matched = true;
if (pmu->type == PERF_TYPE_RAW)
raw_type_match = true;
}
}
#endif
TEST_ASSERT_VAL("No PMU found for type", type_matched);
}
TEST_ASSERT_VAL("Raw PMU not matched", raw_type_match);
return TEST_OK;
}
static int test__checkevent_numeric(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
return TEST_OK;
}
static int test__checkevent_symbolic_name(struct evlist *evlist)
{
struct perf_evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_evsel(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
TEST_ASSERT_VAL("wrong config",
test_perf_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
}
return TEST_OK;
}
static int test__checkevent_symbolic_name_config(struct evlist *evlist)
{
struct perf_evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_evsel(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
/*
* The period value gets configured within evlist__config,
* while this test executes only parse events method.
*/
TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period);
TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1);
TEST_ASSERT_VAL("wrong config2", 1 == evsel->attr.config2);
}
return TEST_OK;
}
static int test__checkevent_symbolic_alias(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_SW_PAGE_FAULTS));
return TEST_OK;
}
static int test__checkevent_genhw(struct evlist *evlist)
{
struct perf_evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 1 << 16));
}
return TEST_OK;
}
static int test__checkevent_breakpoint(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
evsel->core.attr.bp_len);
return TEST_OK;
}
static int test__checkevent_breakpoint_x(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type",
HW_BREAKPOINT_X == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->core.attr.bp_len);
return TEST_OK;
}
static int test__checkevent_breakpoint_r(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type",
HW_BREAKPOINT_R == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len",
HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
return TEST_OK;
}
static int test__checkevent_breakpoint_w(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type",
HW_BREAKPOINT_W == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len",
HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
return TEST_OK;
}
static int test__checkevent_breakpoint_rw(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type",
(HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len",
HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
return TEST_OK;
}
#ifdef HAVE_LIBTRACEEVENT
static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
return test__checkevent_tracepoint(evlist);
}
static int
test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)
{
struct perf_evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1);
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
}
return test__checkevent_tracepoint_multi(evlist);
}
#endif /* HAVE_LIBTRACEEVENT */
static int test__checkevent_raw_modifier(struct evlist *evlist)
{
struct perf_evsel *evsel;
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
}
return test__checkevent_raw(evlist);
}
static int test__checkevent_numeric_modifier(struct evlist *evlist)
{
struct perf_evsel *evsel;
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
}
return test__checkevent_numeric(evlist);
}
static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
{
struct perf_evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == num_core_entries());
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
}
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_exclude_host_modifier(struct evlist *evlist)
{
struct perf_evsel *evsel;
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
}
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)
{
struct perf_evsel *evsel;
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
}
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
return test__checkevent_symbolic_alias(evlist);
}
static int test__checkevent_genhw_modifier(struct evlist *evlist)
{
struct perf_evsel *evsel;
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
}
return test__checkevent_genhw(evlist);
}
static int test__checkevent_exclude_idle_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_breakpoint_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "mem:0:u"));
return test__checkevent_breakpoint(evlist);
}
static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "mem:0:x:k"));
return test__checkevent_breakpoint_x(evlist);
}
static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "mem:0:r:hp"));
return test__checkevent_breakpoint_r(evlist);
}
static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "mem:0:w:up"));
return test__checkevent_breakpoint_w(evlist);
}
static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "mem:0:rw:kp"));
return test__checkevent_breakpoint_rw(evlist);
}
static int test__checkevent_breakpoint_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "breakpoint"));
return test__checkevent_breakpoint(evlist);
}
static int test__checkevent_breakpoint_x_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "breakpoint"));
return test__checkevent_breakpoint_x(evlist);
}
static int test__checkevent_breakpoint_r_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "breakpoint"));
return test__checkevent_breakpoint_r(evlist);
}
static int test__checkevent_breakpoint_w_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "breakpoint"));
return test__checkevent_breakpoint_w(evlist);
}
static int test__checkevent_breakpoint_rw_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "breakpoint"));
return test__checkevent_breakpoint_rw(evlist);
}
static int test__checkevent_breakpoint_2_events(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "breakpoint1"));
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "breakpoint2"));
return TEST_OK;
}
static int test__checkevent_pmu(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 10));
TEST_ASSERT_VAL("wrong config1", 1 == evsel->core.attr.config1);
TEST_ASSERT_VAL("wrong config2", 3 == evsel->core.attr.config2);
TEST_ASSERT_VAL("wrong config3", 0 == evsel->core.attr.config3);
/*
* The period value gets configured within evlist__config,
* while this test executes only parse events method.
*/
TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
return TEST_OK;
}
#ifdef HAVE_LIBTRACEEVENT
static int test__checkevent_list(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 3 <= evlist->core.nr_entries);
/* r1 */
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT != evsel->core.attr.type);
while (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
TEST_ASSERT_VAL("wrong config1", 0 == evsel->core.attr.config1);
TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
TEST_ASSERT_VAL("wrong config3", 0 == evsel->core.attr.config3);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
evsel = evsel__next(evsel);
}
/* syscalls:sys_enter_openat:k */
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
/* 1:1:hp */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
return TEST_OK;
}
#endif
static int test__checkevent_pmu_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
/* cpu/config=1,name=krava/u */
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "krava"));
/* cpu/config=2/u" */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 2));
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "cpu/config=2/u"));
return TEST_OK;
}
static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
/* cpu/config=1,call-graph=fp,time,period=100000/ */
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
/*
* The period, time and callgraph value gets configured within evlist__config,
* while this test executes only parse events method.
*/
TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type));
/* cpu/config=2,call-graph=no,time=0,period=2000/ */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 2));
/*
* The period, time and callgraph value gets configured within evlist__config,
* while this test executes only parse events method.
*/
TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type));
return TEST_OK;
}
static int test__checkevent_pmu_events(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type ||
strcmp(evsel->pmu_name, "cpu"));
TEST_ASSERT_VAL("wrong exclude_user",
!evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel",
evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
return TEST_OK;
}
static int test__checkevent_pmu_events_mix(struct evlist *evlist)
{
struct evsel *evsel = NULL;
/*
* The wild card event will be opened at least once, but it may be
* opened on each core PMU.
*/
TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries >= 2);
for (int i = 0; i < evlist->core.nr_entries - 1; i++) {
evsel = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
/* pmu-event:u */
TEST_ASSERT_VAL("wrong exclude_user",
!evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel",
evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
}
/* cpu/pmu-event/u*/
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", evsel__find_pmu(evsel)->is_core);
TEST_ASSERT_VAL("wrong exclude_user",
!evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel",
evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.pinned);
return TEST_OK;
}
static int test__checkterms_simple(struct list_head *terms)
{
struct parse_events_term *term;
/* config=10 */
term = list_entry(terms->next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 10);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
/* config1 */
term = list_entry(term->list.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1);
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 1);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config1"));
/* config2=3 */
term = list_entry(term->list.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2);
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 3);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config2"));
/* config3=4 */
term = list_entry(term->list.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG3);
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 4);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config3"));
/* umask=1*/
term = list_entry(term->list.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 1);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask"));
/*
* read
*
* The perf_pmu__test_parse_init injects 'read' term into
* perf_pmu_events_list, so 'read' is evaluated as read term
* and not as raw event with 'ead' hex value.
*/
term = list_entry(term->list.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_RAW);
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
TEST_ASSERT_VAL("wrong val", !strcmp(term->val.str, "read"));
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "raw"));
/*
* r0xead
*
* To be still able to pass 'ead' value with 'r' syntax,
* we added support to parse 'r0xHEX' event.
*/
term = list_entry(term->list.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_RAW);
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
TEST_ASSERT_VAL("wrong val", !strcmp(term->val.str, "r0xead"));
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "raw"));
return TEST_OK;
}
static int test__group1(struct evlist *evlist)
{
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (num_core_entries() * 2));
TEST_ASSERT_VAL("wrong number of groups",
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
/* instructions:k */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* cycles:upp */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
/* use of precise requires exclude_guest */
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
}
return TEST_OK;
}
static int test__group2(struct evlist *evlist)
{
struct evsel *evsel, *leader = NULL;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (2 * num_core_entries() + 1));
/*
* TODO: Currently the software event won't be grouped with the hardware
* event except for 1 PMU.
*/
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist));
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == PERF_TYPE_SOFTWARE) {
/* faults + :ku modifier */
leader = evsel;
TEST_ASSERT_VAL("wrong config",
test_config(evsel, PERF_COUNT_SW_PAGE_FAULTS));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
continue;
}
if (evsel->core.attr.type == PERF_TYPE_HARDWARE &&
test_config(evsel, PERF_COUNT_HW_CACHE_REFERENCES)) {
/* cache-references + :u modifier */
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
if (evsel__has_leader(evsel, leader))
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
continue;
}
/* cycles:k */
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
}
return TEST_OK;
}
#ifdef HAVE_LIBTRACEEVENT
static int test__group3(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel, *group1_leader = NULL, *group2_leader = NULL;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus() + 2));
/*
* Currently the software event won't be grouped with the hardware event
* except for 1 PMU. This means there are always just 2 groups
* regardless of the number of core PMUs.
*/
TEST_ASSERT_VAL("wrong number of groups", 2 == evlist__nr_groups(evlist));
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
/* group1 syscalls:sys_enter_openat:H */
group1_leader = evsel;
TEST_ASSERT_VAL("wrong sample_type",
evsel->core.attr.sample_type == PERF_TP_SAMPLE_TYPE);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong group name", !strcmp(evsel->group_name, "group1"));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
continue;
}
if (evsel->core.attr.type == PERF_TYPE_HARDWARE &&
test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)) {
if (evsel->core.attr.exclude_user) {
/* group1 cycles:kppp */
TEST_ASSERT_VAL("wrong exclude_user",
evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel",
!evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
/* use of precise requires exclude_guest */
TEST_ASSERT_VAL("wrong exclude guest",
evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host",
!evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip",
evsel->core.attr.precise_ip == 3);
if (evsel__has_leader(evsel, group1_leader)) {
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong group_idx",
evsel__group_idx(evsel) == 1);
}
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
} else {
/* group2 cycles + G modifier */
group2_leader = evsel;
TEST_ASSERT_VAL("wrong exclude_kernel",
!evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv",
!evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest",
!evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host",
evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
if (evsel->core.nr_members == 2) {
TEST_ASSERT_VAL("wrong group_idx",
evsel__group_idx(evsel) == 0);
}
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
}
continue;
}
if (evsel->core.attr.type == 1) {
/* group2 1:3 + G modifier */
TEST_ASSERT_VAL("wrong config", test_config(evsel, 3));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
if (evsel__has_leader(evsel, group2_leader))
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
continue;
}
/* instructions:u */
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
}
return TEST_OK;
}
#endif
static int test__group4(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (num_core_entries() * 2));
TEST_ASSERT_VAL("wrong number of groups",
num_core_entries() == evlist__nr_groups(evlist));
for (int i = 0; i < num_core_entries(); i++) {
/* cycles:u + p */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
/* use of precise requires exclude_guest */
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 1);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* instructions:kp + p */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
/* use of precise requires exclude_guest */
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
}
return TEST_OK;
}
static int test__group5(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (5 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
evlist__nr_groups(evlist) == (2 * num_core_entries()));
for (int i = 0; i < num_core_entries(); i++) {
/* cycles + G */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* instructions + G */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
}
for (int i = 0; i < num_core_entries(); i++) {
/* cycles:G */
evsel = leader = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* instructions:G */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
}
for (int i = 0; i < num_core_entries(); i++) {
/* cycles */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
}
return TEST_OK;
}
static int test__group_gh1(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
/* cycles + :H group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
/* cache-misses:G + :H group modifier */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
}
return TEST_OK;
}
static int test__group_gh2(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
/* cycles + :G group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
/* cache-misses:H + :G group modifier */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
}
return TEST_OK;
}
static int test__group_gh3(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
/* cycles:G + :u group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
/* cache-misses:H + :u group modifier */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
}
return TEST_OK;
}
static int test__group_gh4(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
/* cycles:G + :uG group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
/* cache-misses:H + :uG group modifier */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
}
return TEST_OK;
}
static int test__leader_sample1(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (3 * num_core_entries()));
for (int i = 0; i < num_core_entries(); i++) {
/* cycles - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
/* cache-misses - not sampling */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
/* branch-misses - not sampling */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
}
return TEST_OK;
}
static int test__leader_sample2(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (2 * num_core_entries()));
for (int i = 0; i < num_core_entries(); i++) {
/* instructions - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
/* branch-misses - not sampling */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
}
return TEST_OK;
}
static int test__checkevent_pinned_modifier(struct evlist *evlist)
{
struct evsel *evsel = NULL;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
evsel = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned);
}
return test__checkevent_symbolic_name(evlist);
}
static int test__pinned_group(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (3 * num_core_entries()));
for (int i = 0; i < num_core_entries(); i++) {
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
/* TODO: The group modifier is not copied to the split group leader. */
if (perf_pmus__num_core_pmus() == 1)
TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned);
/* cache-misses - can not be pinned, but will go on with the leader */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
/* branch-misses - ditto */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
}
return TEST_OK;
}
static int test__checkevent_exclusive_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive);
return test__checkevent_symbolic_name(evlist);
}
static int test__exclusive_group(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == 3 * num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
/* TODO: The group modifier is not copied to the split group leader. */
if (perf_pmus__num_core_pmus() == 1)
TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive);
/* cache-misses - can not be pinned, but will go on with the leader */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
/* branch-misses - ditto */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
}
return TEST_OK;
}
static int test__checkevent_breakpoint_len(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
evsel->core.attr.bp_len);
return TEST_OK;
}
static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
evsel->core.attr.bp_len);
return TEST_OK;
}
static int
test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
return test__checkevent_breakpoint_rw(evlist);
}
static int test__checkevent_precise_max_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == 1 + num_core_entries());
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_SW_TASK_CLOCK));
return TEST_OK;
}
static int test__checkevent_config_symbol(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "insn"));
return TEST_OK;
}
static int test__checkevent_config_raw(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "rawpmu"));
return TEST_OK;
}
static int test__checkevent_config_num(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "numpmu"));
return TEST_OK;
}
static int test__checkevent_config_cache(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "cachepmu"));
return test__checkevent_genhw(evlist);
}
static bool test__pmu_cpu_valid(void)
{
return !!perf_pmus__find("cpu");
}
static bool test__pmu_cpu_event_valid(void)
{
struct perf_pmu *pmu = perf_pmus__find("cpu");
if (!pmu)
return false;
return perf_pmu__has_format(pmu, "event");
}
static bool test__intel_pt_valid(void)
{
return !!perf_pmus__find("intel_pt");
}
static int test__intel_pt(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "intel_pt//u"));
return TEST_OK;
}
static int test__checkevent_complex_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong complex name parsing",
evsel__name_is(evsel,
"COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks"));
return TEST_OK;
}
static int test__checkevent_raw_pmu(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0x1a));
return TEST_OK;
}
static int test__sym_event_slash(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
return TEST_OK;
}
static int test__sym_event_dc(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
return TEST_OK;
}
static int test__term_equal_term(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "name") == 0);
return TEST_OK;
}
static int test__term_equal_legacy(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "l1d") == 0);
return TEST_OK;
}
#ifdef HAVE_LIBTRACEEVENT
static int count_tracepoints(void)
{
struct dirent *events_ent;
DIR *events_dir;
int cnt = 0;
events_dir = tracing_events__opendir();
TEST_ASSERT_VAL("Can't open events dir", events_dir);
while ((events_ent = readdir(events_dir))) {
char *sys_path;
struct dirent *sys_ent;
DIR *sys_dir;
if (!strcmp(events_ent->d_name, ".")
|| !strcmp(events_ent->d_name, "..")
|| !strcmp(events_ent->d_name, "enable")
|| !strcmp(events_ent->d_name, "header_event")
|| !strcmp(events_ent->d_name, "header_page"))
continue;
sys_path = get_events_file(events_ent->d_name);
TEST_ASSERT_VAL("Can't get sys path", sys_path);
sys_dir = opendir(sys_path);
TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
while ((sys_ent = readdir(sys_dir))) {
if (!strcmp(sys_ent->d_name, ".")
|| !strcmp(sys_ent->d_name, "..")
|| !strcmp(sys_ent->d_name, "enable")
|| !strcmp(sys_ent->d_name, "filter"))
continue;
cnt++;
}
closedir(sys_dir);
put_events_file(sys_path);
}
closedir(events_dir);
return cnt;
}
static int test__all_tracepoints(struct evlist *evlist)
{
TEST_ASSERT_VAL("wrong events count",
count_tracepoints() == evlist->core.nr_entries);
return test__checkevent_tracepoint_multi(evlist);
}
#endif /* HAVE_LIBTRACEVENT */
struct evlist_test {
const char *name;
bool (*valid)(void);
int (*check)(struct evlist *evlist);
};
static const struct evlist_test test__events[] = {
#ifdef HAVE_LIBTRACEEVENT
{
.name = "syscalls:sys_enter_openat",
.check = test__checkevent_tracepoint,
/* 0 */
},
{
.name = "syscalls:*",
.check = test__checkevent_tracepoint_multi,
/* 1 */
},
#endif
{
.name = "r1a",
.check = test__checkevent_raw,
/* 2 */
},
{
.name = "1:1",
.check = test__checkevent_numeric,
/* 3 */
},
{
.name = "instructions",
.check = test__checkevent_symbolic_name,
/* 4 */
},
{
.name = "cycles/period=100000,config2/",
.check = test__checkevent_symbolic_name_config,
/* 5 */
},
{
.name = "faults",
.check = test__checkevent_symbolic_alias,
/* 6 */
},
{
.name = "L1-dcache-load-miss",
.check = test__checkevent_genhw,
/* 7 */
},
{
.name = "mem:0",
.check = test__checkevent_breakpoint,
/* 8 */
},
{
.name = "mem:0:x",
.check = test__checkevent_breakpoint_x,
/* 9 */
},
{
.name = "mem:0:r",
.check = test__checkevent_breakpoint_r,
/* 0 */
},
{
.name = "mem:0:w",
.check = test__checkevent_breakpoint_w,
/* 1 */
},
#ifdef HAVE_LIBTRACEEVENT
{
.name = "syscalls:sys_enter_openat:k",
.check = test__checkevent_tracepoint_modifier,
/* 2 */
},
{
.name = "syscalls:*:u",
.check = test__checkevent_tracepoint_multi_modifier,
/* 3 */
},
#endif
{
.name = "r1a:kp",
.check = test__checkevent_raw_modifier,
/* 4 */
},
{
.name = "1:1:hp",
.check = test__checkevent_numeric_modifier,
/* 5 */
},
{
.name = "instructions:h",
.check = test__checkevent_symbolic_name_modifier,
/* 6 */
},
{
.name = "faults:u",
.check = test__checkevent_symbolic_alias_modifier,
/* 7 */
},
{
.name = "L1-dcache-load-miss:kp",
.check = test__checkevent_genhw_modifier,
/* 8 */
},
{
.name = "mem:0:u",
.check = test__checkevent_breakpoint_modifier,
/* 9 */
},
{
.name = "mem:0:x:k",
.check = test__checkevent_breakpoint_x_modifier,
/* 0 */
},
{
.name = "mem:0:r:hp",
.check = test__checkevent_breakpoint_r_modifier,
/* 1 */
},
{
.name = "mem:0:w:up",
.check = test__checkevent_breakpoint_w_modifier,
/* 2 */
},
#ifdef HAVE_LIBTRACEEVENT
{
.name = "r1,syscalls:sys_enter_openat:k,1:1:hp",
.check = test__checkevent_list,
/* 3 */
},
#endif
{
.name = "instructions:G",
.check = test__checkevent_exclude_host_modifier,
/* 4 */
},
{
.name = "instructions:H",
.check = test__checkevent_exclude_guest_modifier,
/* 5 */
},
{
.name = "mem:0:rw",
.check = test__checkevent_breakpoint_rw,
/* 6 */
},
{
.name = "mem:0:rw:kp",
.check = test__checkevent_breakpoint_rw_modifier,
/* 7 */
},
{
.name = "{instructions:k,cycles:upp}",
.check = test__group1,
/* 8 */
},
{
.name = "{faults:k,cache-references}:u,cycles:k",
.check = test__group2,
/* 9 */
},
#ifdef HAVE_LIBTRACEEVENT
{
.name = "group1{syscalls:sys_enter_openat:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
.check = test__group3,
/* 0 */
},
#endif
{
.name = "{cycles:u,instructions:kp}:p",
.check = test__group4,
/* 1 */
},
{
.name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles",
.check = test__group5,
/* 2 */
},
#ifdef HAVE_LIBTRACEEVENT
{
.name = "*:*",
.check = test__all_tracepoints,
/* 3 */
},
#endif
{
.name = "{cycles,cache-misses:G}:H",
.check = test__group_gh1,
/* 4 */
},
{
.name = "{cycles,cache-misses:H}:G",
.check = test__group_gh2,
/* 5 */
},
{
.name = "{cycles:G,cache-misses:H}:u",
.check = test__group_gh3,
/* 6 */
},
{
.name = "{cycles:G,cache-misses:H}:uG",
.check = test__group_gh4,
/* 7 */
},
{
.name = "{cycles,cache-misses,branch-misses}:S",
.check = test__leader_sample1,
/* 8 */
},
{
.name = "{instructions,branch-misses}:Su",
.check = test__leader_sample2,
/* 9 */
},
{
.name = "instructions:uDp",
.check = test__checkevent_pinned_modifier,
/* 0 */
},
{
.name = "{cycles,cache-misses,branch-misses}:D",
.check = test__pinned_group,
/* 1 */
},
{
.name = "mem:0/1",
.check = test__checkevent_breakpoint_len,
/* 2 */
},
{
.name = "mem:0/2:w",
.check = test__checkevent_breakpoint_len_w,
/* 3 */
},
{
.name = "mem:0/4:rw:u",
.check = test__checkevent_breakpoint_len_rw_modifier,
/* 4 */
},
#if defined(__s390x__) && defined(HAVE_LIBTRACEEVENT)
{
.name = "kvm-s390:kvm_s390_create_vm",
.check = test__checkevent_tracepoint,
.valid = kvm_s390_create_vm_valid,
/* 0 */
},
#endif
{
.name = "instructions:I",
.check = test__checkevent_exclude_idle_modifier,
/* 5 */
},
{
.name = "instructions:kIG",
.check = test__checkevent_exclude_idle_modifier_1,
/* 6 */
},
{
.name = "task-clock:P,cycles",
.check = test__checkevent_precise_max_modifier,
/* 7 */
},
{
.name = "instructions/name=insn/",
.check = test__checkevent_config_symbol,
/* 8 */
},
{
.name = "r1234/name=rawpmu/",
.check = test__checkevent_config_raw,
/* 9 */
},
{
.name = "4:0x6530160/name=numpmu/",
.check = test__checkevent_config_num,
/* 0 */
},
{
.name = "L1-dcache-misses/name=cachepmu/",
.check = test__checkevent_config_cache,
/* 1 */
},
{
.name = "intel_pt//u",
.valid = test__intel_pt_valid,
.check = test__intel_pt,
/* 2 */
},
{
.name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
.check = test__checkevent_complex_name,
/* 3 */
},
{
.name = "cycles//u",
.check = test__sym_event_slash,
/* 4 */
},
{
.name = "cycles:k",
.check = test__sym_event_dc,
/* 5 */
},
{
.name = "instructions:uep",
.check = test__checkevent_exclusive_modifier,
/* 6 */
},
{
.name = "{cycles,cache-misses,branch-misses}:e",
.check = test__exclusive_group,
/* 7 */
},
{
.name = "cycles/name=name/",
.check = test__term_equal_term,
/* 8 */
},
{
.name = "cycles/name=l1d/",
.check = test__term_equal_legacy,
/* 9 */
},
{
.name = "mem:0/name=breakpoint/",
.check = test__checkevent_breakpoint,
/* 0 */
},
{
.name = "mem:0:x/name=breakpoint/",
.check = test__checkevent_breakpoint_x,
/* 1 */
},
{
.name = "mem:0:r/name=breakpoint/",
.check = test__checkevent_breakpoint_r,
/* 2 */
},
{
.name = "mem:0:w/name=breakpoint/",
.check = test__checkevent_breakpoint_w,
/* 3 */
},
{
.name = "mem:0/name=breakpoint/u",
.check = test__checkevent_breakpoint_modifier_name,
/* 4 */
},
{
.name = "mem:0:x/name=breakpoint/k",
.check = test__checkevent_breakpoint_x_modifier_name,
/* 5 */
},
{
.name = "mem:0:r/name=breakpoint/hp",
.check = test__checkevent_breakpoint_r_modifier_name,
/* 6 */
},
{
.name = "mem:0:w/name=breakpoint/up",
.check = test__checkevent_breakpoint_w_modifier_name,
/* 7 */
},
{
.name = "mem:0:rw/name=breakpoint/",
.check = test__checkevent_breakpoint_rw,
/* 8 */
},
{
.name = "mem:0:rw/name=breakpoint/kp",
.check = test__checkevent_breakpoint_rw_modifier_name,
/* 9 */
},
{
.name = "mem:0/1/name=breakpoint/",
.check = test__checkevent_breakpoint_len,
/* 0 */
},
{
.name = "mem:0/2:w/name=breakpoint/",
.check = test__checkevent_breakpoint_len_w,
/* 1 */
},
{
.name = "mem:0/4:rw/name=breakpoint/u",
.check = test__checkevent_breakpoint_len_rw_modifier,
/* 2 */
},
{
.name = "mem:0/1/name=breakpoint1/,mem:0/4:rw/name=breakpoint2/",
.check = test__checkevent_breakpoint_2_events,
/* 3 */
},
};
static const struct evlist_test test__events_pmu[] = {
{
.name = "cpu/config=10,config1=1,config2=3,period=1000/u",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_pmu,
/* 0 */
},
{
.name = "cpu/config=1,name=krava/u,cpu/config=2/u",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_pmu_name,
/* 1 */
},
{
.name = "cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_pmu_partial_time_callgraph,
/* 2 */
},
{
.name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
.valid = test__pmu_cpu_event_valid,
.check = test__checkevent_complex_name,
/* 3 */
},
{
.name = "software/r1a/",
.check = test__checkevent_raw_pmu,
/* 4 */
},
{
.name = "software/r0x1a/",
.check = test__checkevent_raw_pmu,
/* 5 */
},
{
.name = "cpu/L1-dcache-load-miss/",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_genhw,
/* 6 */
},
{
.name = "cpu/L1-dcache-load-miss/kp",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_genhw_modifier,
/* 7 */
},
{
.name = "cpu/L1-dcache-misses,name=cachepmu/",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_config_cache,
/* 8 */
},
{
.name = "cpu/instructions/",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_symbolic_name,
/* 9 */
},
{
.name = "cpu/cycles,period=100000,config2/",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_symbolic_name_config,
/* 0 */
},
{
.name = "cpu/instructions/h",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_symbolic_name_modifier,
/* 1 */
},
{
.name = "cpu/instructions/G",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_exclude_host_modifier,
/* 2 */
},
{
.name = "cpu/instructions/H",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_exclude_guest_modifier,
/* 3 */
},
{
.name = "{cpu/instructions/k,cpu/cycles/upp}",
.valid = test__pmu_cpu_valid,
.check = test__group1,
/* 4 */
},
{
.name = "{cpu/cycles/u,cpu/instructions/kp}:p",
.valid = test__pmu_cpu_valid,
.check = test__group4,
/* 5 */
},
{
.name = "{cpu/cycles/,cpu/cache-misses/G}:H",
.valid = test__pmu_cpu_valid,
.check = test__group_gh1,
/* 6 */
},
{
.name = "{cpu/cycles/,cpu/cache-misses/H}:G",
.valid = test__pmu_cpu_valid,
.check = test__group_gh2,
/* 7 */
},
{
.name = "{cpu/cycles/G,cpu/cache-misses/H}:u",
.valid = test__pmu_cpu_valid,
.check = test__group_gh3,
/* 8 */
},
{
.name = "{cpu/cycles/G,cpu/cache-misses/H}:uG",
.valid = test__pmu_cpu_valid,
.check = test__group_gh4,
/* 9 */
},
{
.name = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:S",
.valid = test__pmu_cpu_valid,
.check = test__leader_sample1,
/* 0 */
},
{
.name = "{cpu/instructions/,cpu/branch-misses/}:Su",
.valid = test__pmu_cpu_valid,
.check = test__leader_sample2,
/* 1 */
},
{
.name = "cpu/instructions/uDp",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_pinned_modifier,
/* 2 */
},
{
.name = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:D",
.valid = test__pmu_cpu_valid,
.check = test__pinned_group,
/* 3 */
},
{
.name = "cpu/instructions/I",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_exclude_idle_modifier,
/* 4 */
},
{
.name = "cpu/instructions/kIG",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_exclude_idle_modifier_1,
/* 5 */
},
{
.name = "cpu/cycles/u",
.valid = test__pmu_cpu_valid,
.check = test__sym_event_slash,
/* 6 */
},
{
.name = "cpu/cycles/k",
.valid = test__pmu_cpu_valid,
.check = test__sym_event_dc,
/* 7 */
},
{
.name = "cpu/instructions/uep",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_exclusive_modifier,
/* 8 */
},
{
.name = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:e",
.valid = test__pmu_cpu_valid,
.check = test__exclusive_group,
/* 9 */
},
{
.name = "cpu/cycles,name=name/",
.valid = test__pmu_cpu_valid,
.check = test__term_equal_term,
/* 0 */
},
{
.name = "cpu/cycles,name=l1d/",
.valid = test__pmu_cpu_valid,
.check = test__term_equal_legacy,
/* 1 */
},
};
struct terms_test {
const char *str;
int (*check)(struct list_head *terms);
};
static const struct terms_test test__terms[] = {
[0] = {
.str = "config=10,config1,config2=3,config3=4,umask=1,read,r0xead",
.check = test__checkterms_simple,
},
};
static int test_event(const struct evlist_test *e)
{
struct parse_events_error err;
struct evlist *evlist;
int ret;
if (e->valid && !e->valid()) {
pr_debug("... SKIP\n");
return TEST_OK;
}
evlist = evlist__new();
if (evlist == NULL) {
pr_err("Failed allocation");
return TEST_FAIL;
}
parse_events_error__init(&err);
ret = parse_events(evlist, e->name, &err);
if (ret) {
pr_debug("failed to parse event '%s', err %d, str '%s'\n",
e->name, ret, err.str);
parse_events_error__print(&err, e->name);
ret = TEST_FAIL;
if (err.str && strstr(err.str, "can't access trace events"))
ret = TEST_SKIP;
} else {
ret = e->check(evlist);
}
parse_events_error__exit(&err);
evlist__delete(evlist);
return ret;
}
static int test_event_fake_pmu(const char *str)
{
struct parse_events_error err;
struct evlist *evlist;
int ret;
evlist = evlist__new();
if (!evlist)
return -ENOMEM;
parse_events_error__init(&err);
ret = __parse_events(evlist, str, /*pmu_filter=*/NULL, &err,
&perf_pmu__fake, /*warn_if_reordered=*/true);
if (ret) {
pr_debug("failed to parse event '%s', err %d, str '%s'\n",
str, ret, err.str);
parse_events_error__print(&err, str);
}
parse_events_error__exit(&err);
evlist__delete(evlist);
return ret;
}
static int combine_test_results(int existing, int latest)
{
if (existing == TEST_FAIL)
return TEST_FAIL;
if (existing == TEST_SKIP)
return latest == TEST_OK ? TEST_SKIP : latest;
return latest;
}
static int test_events(const struct evlist_test *events, int cnt)
{
int ret = TEST_OK;
for (int i = 0; i < cnt; i++) {
const struct evlist_test *e = &events[i];
int test_ret;
pr_debug("running test %d '%s'\n", i, e->name);
test_ret = test_event(e);
if (test_ret != TEST_OK) {
pr_debug("Event test failure: test %d '%s'", i, e->name);
ret = combine_test_results(ret, test_ret);
}
}
return ret;
}
static int test__events2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
return test_events(test__events, ARRAY_SIZE(test__events));
}
static int test_term(const struct terms_test *t)
{
struct list_head terms;
int ret;
INIT_LIST_HEAD(&terms);
ret = parse_events_terms(&terms, t->str, /*input=*/ NULL);
if (ret) {
pr_debug("failed to parse terms '%s', err %d\n",
t->str , ret);
return ret;
}
ret = t->check(&terms);
parse_events_terms__purge(&terms);
return ret;
}
static int test_terms(const struct terms_test *terms, int cnt)
{
int ret = 0;
for (int i = 0; i < cnt; i++) {
const struct terms_test *t = &terms[i];
pr_debug("running test %d '%s'\n", i, t->str);
ret = test_term(t);
if (ret)
break;
}
return ret;
}
static int test__terms2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
return test_terms(test__terms, ARRAY_SIZE(test__terms));
}
static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_pmu *pmu = NULL;
int ret = TEST_OK;
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
struct stat st;
char path[PATH_MAX];
struct dirent *ent;
DIR *dir;
int err;
snprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/",
sysfs__mountpoint(), pmu->name);
err = stat(path, &st);
if (err) {
pr_debug("skipping PMU %s events tests: %s\n", pmu->name, path);
continue;
}
dir = opendir(path);
if (!dir) {
pr_debug("can't open pmu event dir: %s\n", path);
ret = combine_test_results(ret, TEST_SKIP);
continue;
}
while ((ent = readdir(dir))) {
struct evlist_test e = { .name = NULL, };
char name[2 * NAME_MAX + 1 + 12 + 3];
int test_ret;
/* Names containing . are special and cannot be used directly */
if (strchr(ent->d_name, '.'))
continue;
snprintf(name, sizeof(name), "%s/event=%s/u", pmu->name, ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events;
test_ret = test_event(&e);
if (test_ret != TEST_OK) {
pr_debug("Test PMU event failed for '%s'", name);
ret = combine_test_results(ret, test_ret);
}
if (!is_pmu_core(pmu->name))
continue;
/*
* Names containing '-' are recognized as prefixes and suffixes
* due to '-' being a legacy PMU separator. This fails when the
* prefix or suffix collides with an existing legacy token. For
* example, branch-brs has a prefix (branch) that collides with
* a PE_NAME_CACHE_TYPE token causing a parse error as a suffix
* isn't expected after this. As event names in the config
* slashes are allowed a '-' in the name we check this works
* above.
*/
if (strchr(ent->d_name, '-'))
continue;
snprintf(name, sizeof(name), "%s:u,%s/event=%s/u",
ent->d_name, pmu->name, ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events_mix;
test_ret = test_event(&e);
if (test_ret != TEST_OK) {
pr_debug("Test PMU event failed for '%s'", name);
ret = combine_test_results(ret, test_ret);
}
}
closedir(dir);
}
return ret;
}
static int test__pmu_events2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
return test_events(test__events_pmu, ARRAY_SIZE(test__events_pmu));
}
static bool test_alias(char **event, char **alias)
{
char path[PATH_MAX];
DIR *dir;
struct dirent *dent;
const char *sysfs = sysfs__mountpoint();
char buf[128];
FILE *file;
if (!sysfs)
return false;
snprintf(path, PATH_MAX, "%s/bus/event_source/devices/", sysfs);
dir = opendir(path);
if (!dir)
return false;
while ((dent = readdir(dir))) {
if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, ".."))
continue;
snprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/alias",
sysfs, dent->d_name);
if (!file_available(path))
continue;
file = fopen(path, "r");
if (!file)
continue;
if (!fgets(buf, sizeof(buf), file)) {
fclose(file);
continue;
}
/* Remove the last '\n' */
buf[strlen(buf) - 1] = 0;
fclose(file);
*event = strdup(dent->d_name);
*alias = strdup(buf);
closedir(dir);
if (*event == NULL || *alias == NULL) {
free(*event);
free(*alias);
return false;
}
return true;
}
closedir(dir);
return false;
}
static int test__checkevent_pmu_events_alias(struct evlist *evlist)
{
struct evsel *evsel1 = evlist__first(evlist);
struct evsel *evsel2 = evlist__last(evlist);
TEST_ASSERT_VAL("wrong type", evsel1->core.attr.type == evsel2->core.attr.type);
TEST_ASSERT_VAL("wrong config", evsel1->core.attr.config == evsel2->core.attr.config);
return TEST_OK;
}
static int test__pmu_events_alias(char *event, char *alias)
{
struct evlist_test e = { .name = NULL, };
char name[2 * NAME_MAX + 20];
snprintf(name, sizeof(name), "%s/event=1/,%s/event=1/",
event, alias);
e.name = name;
e.check = test__checkevent_pmu_events_alias;
return test_event(&e);
}
static int test__alias(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char *event, *alias;
int ret;
if (!test_alias(&event, &alias))
return TEST_SKIP;
ret = test__pmu_events_alias(event, alias);
free(event);
free(alias);
return ret;
}
static int test__pmu_events_alias2(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
static const char events[][30] = {
"event-hyphen",
"event-two-hyph",
};
int ret = TEST_OK;
for (unsigned int i = 0; i < ARRAY_SIZE(events); i++) {
int test_ret = test_event_fake_pmu(&events[i][0]);
if (test_ret != TEST_OK) {
pr_debug("check_parse_fake %s failed\n", &events[i][0]);
ret = combine_test_results(ret, test_ret);
}
}
return ret;
}
static struct test_case tests__parse_events[] = {
TEST_CASE_REASON("Test event parsing",
events2,
"permissions"),
TEST_CASE_REASON("Parsing of all PMU events from sysfs",
pmu_events,
"permissions"),
TEST_CASE_REASON("Parsing of given PMU events from sysfs",
pmu_events2,
"permissions"),
TEST_CASE_REASON("Parsing of aliased events from sysfs", alias,
"no aliases in sysfs"),
TEST_CASE("Parsing of aliased events", pmu_events_alias2),
TEST_CASE("Parsing of terms (event modifiers)", terms2),
{ .name = NULL, }
};
struct test_suite suite__parse_events = {
.desc = "Parse event definition strings",
.test_cases = tests__parse_events,
};
| linux-master | tools/perf/tests/parse-events.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <perf/cpumap.h>
#include <internal/cpumap.h>
#include "debug.h"
#include "env.h"
#include "mem2node.h"
#include "tests.h"
static struct node {
int node;
const char *map;
} test_nodes[] = {
{ .node = 0, .map = "0" },
{ .node = 1, .map = "1-2" },
{ .node = 3, .map = "5-7,9" },
};
#define T TEST_ASSERT_VAL
static unsigned long *get_bitmap(const char *str, int nbits)
{
struct perf_cpu_map *map = perf_cpu_map__new(str);
unsigned long *bm = NULL;
bm = bitmap_zalloc(nbits);
if (map && bm) {
struct perf_cpu cpu;
int i;
perf_cpu_map__for_each_cpu(cpu, i, map)
__set_bit(cpu.cpu, bm);
}
if (map)
perf_cpu_map__put(map);
else
free(bm);
return bm && map ? bm : NULL;
}
static int test__mem2node(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
struct mem2node map;
struct memory_node nodes[3];
struct perf_env env = {
.memory_nodes = (struct memory_node *) &nodes[0],
.nr_memory_nodes = ARRAY_SIZE(nodes),
.memory_bsize = 0x100,
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(nodes); i++) {
nodes[i].node = test_nodes[i].node;
nodes[i].size = 10;
T("failed: alloc bitmap",
(nodes[i].set = get_bitmap(test_nodes[i].map, 10)));
}
T("failed: mem2node__init", !mem2node__init(&map, &env));
T("failed: mem2node__node", 0 == mem2node__node(&map, 0x50));
T("failed: mem2node__node", 1 == mem2node__node(&map, 0x100));
T("failed: mem2node__node", 1 == mem2node__node(&map, 0x250));
T("failed: mem2node__node", 3 == mem2node__node(&map, 0x500));
T("failed: mem2node__node", 3 == mem2node__node(&map, 0x650));
T("failed: mem2node__node", -1 == mem2node__node(&map, 0x450));
T("failed: mem2node__node", -1 == mem2node__node(&map, 0x1050));
for (i = 0; i < ARRAY_SIZE(nodes); i++)
zfree(&nodes[i].set);
mem2node__exit(&map);
return 0;
}
DEFINE_SUITE("mem2node", mem2node);
| linux-master | tools/perf/tests/mem2node.c |
// SPDX-License-Identifier: GPL-2.0
#include "evlist.h"
#include "evsel.h"
#include "parse-events.h"
#include "tests.h"
#include "debug.h"
#include <linux/kernel.h>
static int perf_evsel__roundtrip_cache_name_test(void)
{
int ret = TEST_OK;
for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
/* skip invalid cache type */
if (!evsel__is_cache_op_valid(type, op))
continue;
for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) {
char name[128];
struct evlist *evlist = evlist__new();
struct evsel *evsel;
int err;
if (evlist == NULL) {
pr_debug("Failed to alloc evlist");
return TEST_FAIL;
}
__evsel__hw_cache_type_op_res_name(type, op, res,
name, sizeof(name));
err = parse_event(evlist, name);
if (err) {
pr_debug("Failure to parse cache event '%s' possibly as PMUs don't support it",
name);
evlist__delete(evlist);
continue;
}
evlist__for_each_entry(evlist, evsel) {
if (strcmp(evsel__name(evsel), name)) {
pr_debug("%s != %s\n", evsel__name(evsel), name);
ret = TEST_FAIL;
}
}
evlist__delete(evlist);
}
}
}
return ret;
}
static int perf_evsel__name_array_test(const char *const names[], int nr_names)
{
int ret = TEST_OK;
for (int i = 0; i < nr_names; ++i) {
struct evlist *evlist = evlist__new();
struct evsel *evsel;
int err;
if (evlist == NULL) {
pr_debug("Failed to alloc evlist");
return TEST_FAIL;
}
err = parse_event(evlist, names[i]);
if (err) {
pr_debug("failed to parse event '%s', err %d\n",
names[i], err);
evlist__delete(evlist);
ret = TEST_FAIL;
continue;
}
evlist__for_each_entry(evlist, evsel) {
if (strcmp(evsel__name(evsel), names[i])) {
pr_debug("%s != %s\n", evsel__name(evsel), names[i]);
ret = TEST_FAIL;
}
}
evlist__delete(evlist);
}
return ret;
}
static int test__perf_evsel__roundtrip_name_test(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int err = 0, ret = TEST_OK;
err = perf_evsel__name_array_test(evsel__hw_names, PERF_COUNT_HW_MAX);
if (err)
ret = err;
err = perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1);
if (err)
ret = err;
err = perf_evsel__roundtrip_cache_name_test();
if (err)
ret = err;
return ret;
}
DEFINE_SUITE("Roundtrip evsel->name", perf_evsel__roundtrip_name_test);
| linux-master | tools/perf/tests/evsel-roundtrip-name.c |
// SPDX-License-Identifier: GPL-2.0
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
#include "target.h"
#include "thread_map.h"
#include "tests.h"
#include "util/mmap.h"
#include <errno.h>
#include <signal.h>
#include <linux/string.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
#include <perf/mmap.h>
static int exited;
static int nr_exit;
static void sig_handler(int sig __maybe_unused)
{
exited = 1;
}
/*
* evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
* we asked by setting its exec_error to this handler.
*/
static void workload_exec_failed_signal(int signo __maybe_unused,
siginfo_t *info __maybe_unused,
void *ucontext __maybe_unused)
{
exited = 1;
nr_exit = -1;
}
/*
* This test will start a workload that does nothing then it checks
* if the number of exit event reported by the kernel is 1 or not
* in order to check the kernel returns correct number of event.
*/
static int test__task_exit(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = -1;
union perf_event *event;
struct evsel *evsel;
struct evlist *evlist;
struct target target = {
.uid = UINT_MAX,
.uses_mmap = true,
};
const char *argv[] = { "true", NULL };
char sbuf[STRERR_BUFSIZE];
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
struct mmap *md;
int retry_count = 0;
signal(SIGCHLD, sig_handler);
evlist = evlist__new_dummy();
if (evlist == NULL) {
pr_debug("evlist__new_dummy\n");
return -1;
}
/*
* Create maps of threads and cpus to monitor. In this case
* we start with all threads and cpus (-1, -1) but then in
* evlist__prepare_workload we'll fill in the only thread
* we're monitoring, the one forked there.
*/
cpus = perf_cpu_map__dummy_new();
threads = thread_map__new_by_tid(-1);
if (!cpus || !threads) {
err = -ENOMEM;
pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_evlist;
}
perf_evlist__set_maps(&evlist->core, cpus, threads);
err = evlist__prepare_workload(evlist, &target, argv, false, workload_exec_failed_signal);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
goto out_delete_evlist;
}
evsel = evlist__first(evlist);
evsel->core.attr.task = 1;
#ifdef __s390x__
evsel->core.attr.sample_freq = 1000000;
#else
evsel->core.attr.sample_freq = 1;
#endif
evsel->core.attr.inherit = 0;
evsel->core.attr.watermark = 0;
evsel->core.attr.wakeup_events = 1;
evsel->core.attr.exclude_kernel = 1;
err = evlist__open(evlist);
if (err < 0) {
pr_debug("Couldn't open the evlist: %s\n",
str_error_r(-err, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
if (evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
err = -1;
goto out_delete_evlist;
}
evlist__start_workload(evlist);
retry:
md = &evlist->mmap[0];
if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
if (event->header.type == PERF_RECORD_EXIT)
nr_exit++;
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(&md->core);
out_init:
if (!exited || !nr_exit) {
evlist__poll(evlist, -1);
if (retry_count++ > 1000) {
pr_debug("Failed after retrying 1000 times\n");
err = -1;
goto out_delete_evlist;
}
goto retry;
}
if (nr_exit != 1) {
pr_debug("received %d EXIT records\n", nr_exit);
err = -1;
}
out_delete_evlist:
perf_cpu_map__put(cpus);
perf_thread_map__put(threads);
evlist__delete(evlist);
return err;
}
DEFINE_SUITE("Number of exit events of a simple workload", task_exit);
| linux-master | tools/perf/tests/task-exit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-test.c
*
* Builtin regression testing command: ever growing number of sanity tests
*/
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <dirent.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include "builtin.h"
#include "hist.h"
#include "intlist.h"
#include "tests.h"
#include "debug.h"
#include "color.h"
#include <subcmd/parse-options.h>
#include "string2.h"
#include "symbol.h"
#include "util/rlimit.h"
#include <linux/kernel.h>
#include <linux/string.h>
#include <subcmd/exec-cmd.h>
#include <linux/zalloc.h>
#include "builtin-test-list.h"
static bool dont_fork;
const char *dso_to_test;
/*
* List of architecture specific tests. Not a weak symbol as the array length is
* dependent on the initialization, as such GCC with LTO complains of
* conflicting definitions with a weak symbol.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
extern struct test_suite *arch_tests[];
#else
static struct test_suite *arch_tests[] = {
NULL,
};
#endif
static struct test_suite *generic_tests[] = {
&suite__vmlinux_matches_kallsyms,
#ifdef HAVE_LIBTRACEEVENT
&suite__openat_syscall_event,
&suite__openat_syscall_event_on_all_cpus,
&suite__basic_mmap,
#endif
&suite__mem,
&suite__parse_events,
&suite__expr,
&suite__PERF_RECORD,
&suite__pmu,
&suite__pmu_events,
&suite__dso_data,
&suite__dso_data_cache,
&suite__dso_data_reopen,
&suite__perf_evsel__roundtrip_name_test,
#ifdef HAVE_LIBTRACEEVENT
&suite__perf_evsel__tp_sched_test,
&suite__syscall_openat_tp_fields,
#endif
&suite__attr,
&suite__hists_link,
&suite__python_use,
&suite__bp_signal,
&suite__bp_signal_overflow,
&suite__bp_accounting,
&suite__wp,
&suite__task_exit,
&suite__sw_clock_freq,
&suite__code_reading,
&suite__sample_parsing,
&suite__keep_tracking,
&suite__parse_no_sample_id_all,
&suite__hists_filter,
&suite__mmap_thread_lookup,
&suite__thread_maps_share,
&suite__hists_output,
&suite__hists_cumulate,
#ifdef HAVE_LIBTRACEEVENT
&suite__switch_tracking,
#endif
&suite__fdarray__filter,
&suite__fdarray__add,
&suite__kmod_path__parse,
&suite__thread_map,
&suite__session_topology,
&suite__thread_map_synthesize,
&suite__thread_map_remove,
&suite__cpu_map,
&suite__synthesize_stat_config,
&suite__synthesize_stat,
&suite__synthesize_stat_round,
&suite__event_update,
&suite__event_times,
&suite__backward_ring_buffer,
&suite__sdt_event,
&suite__is_printable_array,
&suite__bitmap_print,
&suite__perf_hooks,
&suite__unit_number__scnprint,
&suite__mem2node,
&suite__time_utils,
&suite__jit_write_elf,
&suite__pfm,
&suite__api_io,
&suite__maps__merge_in,
&suite__demangle_java,
&suite__demangle_ocaml,
&suite__parse_metric,
&suite__pe_file_parsing,
&suite__expand_cgroup_events,
&suite__perf_time_to_tsc,
&suite__dlfilter,
&suite__sigtrap,
&suite__event_groups,
&suite__symbols,
NULL,
};
static struct test_suite **tests[] = {
generic_tests,
arch_tests,
};
static struct test_workload *workloads[] = {
&workload__noploop,
&workload__thloop,
&workload__leafloop,
&workload__sqrtloop,
&workload__brstack,
&workload__datasym,
};
static int num_subtests(const struct test_suite *t)
{
int num;
if (!t->test_cases)
return 0;
num = 0;
while (t->test_cases[num].name)
num++;
return num;
}
static bool has_subtests(const struct test_suite *t)
{
return num_subtests(t) > 1;
}
static const char *skip_reason(const struct test_suite *t, int subtest)
{
if (!t->test_cases)
return NULL;
return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason;
}
static const char *test_description(const struct test_suite *t, int subtest)
{
if (t->test_cases && subtest >= 0)
return t->test_cases[subtest].desc;
return t->desc;
}
static test_fnptr test_function(const struct test_suite *t, int subtest)
{
if (subtest <= 0)
return t->test_cases[0].run_case;
return t->test_cases[subtest].run_case;
}
static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
{
int i;
if (argc == 0)
return true;
for (i = 0; i < argc; ++i) {
char *end;
long nr = strtoul(argv[i], &end, 10);
if (*end == '\0') {
if (nr == curr + 1)
return true;
continue;
}
if (strcasestr(desc, argv[i]))
return true;
}
return false;
}
static int run_test(struct test_suite *test, int subtest)
{
int status, err = -1, child = dont_fork ? 0 : fork();
char sbuf[STRERR_BUFSIZE];
if (child < 0) {
pr_err("failed to fork test: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
return -1;
}
if (!child) {
if (!dont_fork) {
pr_debug("test child forked, pid %d\n", getpid());
if (verbose <= 0) {
int nullfd = open("/dev/null", O_WRONLY);
if (nullfd >= 0) {
close(STDERR_FILENO);
close(STDOUT_FILENO);
dup2(nullfd, STDOUT_FILENO);
dup2(STDOUT_FILENO, STDERR_FILENO);
close(nullfd);
}
} else {
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
}
}
err = test_function(test, subtest)(test, subtest);
if (!dont_fork)
exit(err);
}
if (!dont_fork) {
wait(&status);
if (WIFEXITED(status)) {
err = (signed char)WEXITSTATUS(status);
pr_debug("test child finished with %d\n", err);
} else if (WIFSIGNALED(status)) {
err = -1;
pr_debug("test child interrupted\n");
}
}
return err;
}
#define for_each_test(j, k, t) \
for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
while ((t = tests[j][k++]) != NULL)
static int test_and_print(struct test_suite *t, int subtest)
{
int err;
pr_debug("\n--- start ---\n");
err = run_test(t, subtest);
pr_debug("---- end ----\n");
if (!has_subtests(t))
pr_debug("%s:", t->desc);
else
pr_debug("%s subtest %d:", t->desc, subtest + 1);
switch (err) {
case TEST_OK:
pr_info(" Ok\n");
break;
case TEST_SKIP: {
const char *reason = skip_reason(t, subtest);
if (reason)
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
else
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
}
break;
case TEST_FAIL:
default:
color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
break;
}
return err;
}
struct shell_test {
const char *dir;
const char *file;
};
static int shell_test__run(struct test_suite *test, int subdir __maybe_unused)
{
int err;
char script[PATH_MAX];
struct shell_test *st = test->priv;
path__join(script, sizeof(script) - 3, st->dir, st->file);
if (verbose > 0)
strncat(script, " -v", sizeof(script) - strlen(script) - 1);
err = system(script);
if (!err)
return TEST_OK;
return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL;
}
static int run_shell_tests(int argc, const char *argv[], int i, int width,
struct intlist *skiplist)
{
struct shell_test st;
const struct script_file *files, *file;
files = list_script_files();
if (!files)
return 0;
for (file = files; file->dir; file++) {
int curr = i++;
struct test_case test_cases[] = {
{
.desc = file->desc,
.run_case = shell_test__run,
},
{ .name = NULL, }
};
struct test_suite test_suite = {
.desc = test_cases[0].desc,
.test_cases = test_cases,
.priv = &st,
};
st.dir = file->dir;
if (test_suite.desc == NULL ||
!perf_test__matches(test_suite.desc, curr, argc, argv))
continue;
st.file = file->file;
pr_info("%3d: %-*s:", i, width, test_suite.desc);
if (intlist__find(skiplist, i)) {
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
continue;
}
test_and_print(&test_suite, 0);
}
return 0;
}
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{
struct test_suite *t;
unsigned int j, k;
int i = 0;
int width = list_script_max_width();
for_each_test(j, k, t) {
int len = strlen(test_description(t, -1));
if (width < len)
width = len;
}
for_each_test(j, k, t) {
int curr = i++;
int subi;
if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) {
bool skip = true;
int subn;
subn = num_subtests(t);
for (subi = 0; subi < subn; subi++) {
if (perf_test__matches(test_description(t, subi),
curr, argc, argv))
skip = false;
}
if (skip)
continue;
}
pr_info("%3d: %-*s:", i, width, test_description(t, -1));
if (intlist__find(skiplist, i)) {
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
continue;
}
if (!has_subtests(t)) {
test_and_print(t, -1);
} else {
int subn = num_subtests(t);
/*
* minus 2 to align with normal testcases.
* For subtest we print additional '.x' in number.
* for example:
*
* 35: Test LLVM searching and compiling :
* 35.1: Basic BPF llvm compiling test : Ok
*/
int subw = width > 2 ? width - 2 : width;
if (subn <= 0) {
color_fprintf(stderr, PERF_COLOR_YELLOW,
" Skip (not compiled in)\n");
continue;
}
pr_info("\n");
for (subi = 0; subi < subn; subi++) {
int len = strlen(test_description(t, subi));
if (subw < len)
subw = len;
}
for (subi = 0; subi < subn; subi++) {
if (!perf_test__matches(test_description(t, subi),
curr, argc, argv))
continue;
pr_info("%3d.%1d: %-*s:", i, subi + 1, subw,
test_description(t, subi));
test_and_print(t, subi);
}
}
}
return run_shell_tests(argc, argv, i, width, skiplist);
}
static int perf_test__list_shell(int argc, const char **argv, int i)
{
const struct script_file *files, *file;
files = list_script_files();
if (!files)
return 0;
for (file = files; file->dir; file++) {
int curr = i++;
struct test_suite t = {
.desc = file->desc
};
if (!perf_test__matches(t.desc, curr, argc, argv))
continue;
pr_info("%3d: %s\n", i, t.desc);
}
return 0;
}
static int perf_test__list(int argc, const char **argv)
{
unsigned int j, k;
struct test_suite *t;
int i = 0;
for_each_test(j, k, t) {
int curr = i++;
if (!perf_test__matches(test_description(t, -1), curr, argc, argv))
continue;
pr_info("%3d: %s\n", i, test_description(t, -1));
if (has_subtests(t)) {
int subn = num_subtests(t);
int subi;
for (subi = 0; subi < subn; subi++)
pr_info("%3d:%1d: %s\n", i, subi + 1,
test_description(t, subi));
}
}
perf_test__list_shell(argc, argv, i);
return 0;
}
static int run_workload(const char *work, int argc, const char **argv)
{
unsigned int i = 0;
struct test_workload *twl;
for (i = 0; i < ARRAY_SIZE(workloads); i++) {
twl = workloads[i];
if (!strcmp(twl->name, work))
return twl->func(argc, argv);
}
pr_info("No workload found: %s\n", work);
return -1;
}
int cmd_test(int argc, const char **argv)
{
const char *test_usage[] = {
"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
NULL,
};
const char *skip = NULL;
const char *workload = NULL;
const struct option test_options[] = {
OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('F', "dont-fork", &dont_fork,
"Do not fork for testcase"),
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
OPT_END()
};
const char * const test_subcommands[] = { "list", NULL };
struct intlist *skiplist = NULL;
int ret = hists__init();
if (ret < 0)
return ret;
/* Unbuffered output */
setvbuf(stdout, NULL, _IONBF, 0);
argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
if (argc >= 1 && !strcmp(argv[0], "list"))
return perf_test__list(argc - 1, argv + 1);
if (workload)
return run_workload(workload, argc, argv);
symbol_conf.priv_size = sizeof(int);
symbol_conf.try_vmlinux_path = true;
if (symbol__init(NULL) < 0)
return -1;
if (skip != NULL)
skiplist = intlist__new(skip);
/*
* Tests that create BPF maps, for instance, need more than the 64K
* default:
*/
rlimit__bump_memlock();
return __cmd_test(argc, argv, skiplist);
}
| linux-master | tools/perf/tests/builtin-test.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/string.h>
#include <sys/mman.h>
#include <limits.h>
#include "debug.h"
#include "dso.h"
#include "machine.h"
#include "thread.h"
#include "symbol.h"
#include "map.h"
#include "util.h"
#include "tests.h"
struct test_info {
struct machine *machine;
struct thread *thread;
};
static int init_test_info(struct test_info *ti)
{
ti->machine = machine__new_host();
if (!ti->machine) {
pr_debug("machine__new_host() failed!\n");
return TEST_FAIL;
}
/* Create a dummy thread */
ti->thread = machine__findnew_thread(ti->machine, 100, 100);
if (!ti->thread) {
pr_debug("machine__findnew_thread() failed!\n");
return TEST_FAIL;
}
return TEST_OK;
}
static void exit_test_info(struct test_info *ti)
{
thread__put(ti->thread);
machine__delete(ti->machine);
}
static void get_test_dso_filename(char *filename, size_t max_sz)
{
if (dso_to_test)
strlcpy(filename, dso_to_test, max_sz);
else
perf_exe(filename, max_sz);
}
static int create_map(struct test_info *ti, char *filename, struct map **map_p)
{
/* Create a dummy map at 0x100000 */
*map_p = map__new(ti->machine, 0x100000, 0xffffffff, 0, NULL,
PROT_EXEC, 0, NULL, filename, ti->thread);
if (!*map_p) {
pr_debug("Failed to create map!");
return TEST_FAIL;
}
return TEST_OK;
}
static int test_dso(struct dso *dso)
{
struct symbol *last_sym = NULL;
struct rb_node *nd;
int ret = TEST_OK;
/* dso__fprintf() prints all the symbols */
if (verbose > 1)
dso__fprintf(dso, stderr);
for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
if (sym->type != STT_FUNC && sym->type != STT_GNU_IFUNC)
continue;
/* Check for overlapping function symbols */
if (last_sym && sym->start < last_sym->end) {
pr_debug("Overlapping symbols:\n");
symbol__fprintf(last_sym, stderr);
symbol__fprintf(sym, stderr);
ret = TEST_FAIL;
}
/* Check for zero-length function symbol */
if (sym->start == sym->end) {
pr_debug("Zero-length symbol:\n");
symbol__fprintf(sym, stderr);
ret = TEST_FAIL;
}
last_sym = sym;
}
return ret;
}
static int test_file(struct test_info *ti, char *filename)
{
struct map *map = NULL;
int ret, nr;
struct dso *dso;
pr_debug("Testing %s\n", filename);
ret = create_map(ti, filename, &map);
if (ret != TEST_OK)
return ret;
dso = map__dso(map);
nr = dso__load(dso, map);
if (nr < 0) {
pr_debug("dso__load() failed!\n");
ret = TEST_FAIL;
goto out_put;
}
if (nr == 0) {
pr_debug("DSO has no symbols!\n");
ret = TEST_SKIP;
goto out_put;
}
ret = test_dso(dso);
out_put:
map__put(map);
return ret;
}
static int test__symbols(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char filename[PATH_MAX];
struct test_info ti;
int ret;
ret = init_test_info(&ti);
if (ret != TEST_OK)
return ret;
get_test_dso_filename(filename, sizeof(filename));
ret = test_file(&ti, filename);
exit_test_info(&ti);
return ret;
}
DEFINE_SUITE("Symbols", symbols);
| linux-master | tools/perf/tests/symbols.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include "event.h"
#include "tests.h"
#include "stat.h"
#include "counts.h"
#include "debug.h"
#include "util/synthetic-events.h"
static bool has_term(struct perf_record_stat_config *config,
u64 tag, u64 val)
{
unsigned i;
for (i = 0; i < config->nr; i++) {
if ((config->data[i].tag == tag) &&
(config->data[i].val == val))
return true;
}
return false;
}
static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_record_stat_config *config = &event->stat_config;
struct perf_stat_config stat_config = {};
#define HAS(term, val) \
has_term(config, PERF_STAT_CONFIG_TERM__##term, val)
TEST_ASSERT_VAL("wrong nr", config->nr == PERF_STAT_CONFIG_TERM__MAX);
TEST_ASSERT_VAL("wrong aggr_mode", HAS(AGGR_MODE, AGGR_CORE));
TEST_ASSERT_VAL("wrong scale", HAS(SCALE, 1));
TEST_ASSERT_VAL("wrong interval", HAS(INTERVAL, 1));
#undef HAS
perf_event__read_stat_config(&stat_config, config);
TEST_ASSERT_VAL("wrong aggr_mode", stat_config.aggr_mode == AGGR_CORE);
TEST_ASSERT_VAL("wrong scale", stat_config.scale == 1);
TEST_ASSERT_VAL("wrong interval", stat_config.interval == 1);
return 0;
}
static int test__synthesize_stat_config(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct perf_stat_config stat_config = {
.aggr_mode = AGGR_CORE,
.scale = 1,
.interval = 1,
};
TEST_ASSERT_VAL("failed to synthesize stat_config",
!perf_event__synthesize_stat_config(NULL, &stat_config, process_stat_config_event, NULL));
return 0;
}
static int process_stat_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_record_stat *st = &event->stat;
TEST_ASSERT_VAL("wrong cpu", st->cpu == 1);
TEST_ASSERT_VAL("wrong thread", st->thread == 2);
TEST_ASSERT_VAL("wrong id", st->id == 3);
TEST_ASSERT_VAL("wrong val", st->val == 100);
TEST_ASSERT_VAL("wrong run", st->ena == 200);
TEST_ASSERT_VAL("wrong ena", st->run == 300);
return 0;
}
static int test__synthesize_stat(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_counts_values count;
count.val = 100;
count.ena = 200;
count.run = 300;
TEST_ASSERT_VAL("failed to synthesize stat_config",
!perf_event__synthesize_stat(NULL, (struct perf_cpu){.cpu = 1}, 2, 3,
&count, process_stat_event, NULL));
return 0;
}
static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_record_stat_round *stat_round = &event->stat_round;
TEST_ASSERT_VAL("wrong time", stat_round->time == 0xdeadbeef);
TEST_ASSERT_VAL("wrong type", stat_round->type == PERF_STAT_ROUND_TYPE__INTERVAL);
return 0;
}
static int test__synthesize_stat_round(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
TEST_ASSERT_VAL("failed to synthesize stat_config",
!perf_event__synthesize_stat_round(NULL, 0xdeadbeef, PERF_STAT_ROUND_TYPE__INTERVAL,
process_stat_round_event, NULL));
return 0;
}
DEFINE_SUITE("Synthesize stat config", synthesize_stat_config);
DEFINE_SUITE("Synthesize stat", synthesize_stat);
DEFINE_SUITE("Synthesize stat round", synthesize_stat_round);
| linux-master | tools/perf/tests/stat.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <subcmd/exec-cmd.h>
#include "debug.h"
#include "util/build-id.h"
#include "util/symbol.h"
#include "util/dso.h"
#include "tests.h"
#ifdef HAVE_LIBBFD_SUPPORT
static int run_dir(const char *d)
{
char filename[PATH_MAX];
char debugfile[PATH_MAX];
struct build_id bid;
char debuglink[PATH_MAX];
char expect_build_id[] = {
0x5a, 0x0f, 0xd8, 0x82, 0xb5, 0x30, 0x84, 0x22,
0x4b, 0xa4, 0x7b, 0x62, 0x4c, 0x55, 0xa4, 0x69,
};
char expect_debuglink[PATH_MAX] = "pe-file.exe.debug";
struct dso *dso;
struct symbol *sym;
int ret;
size_t idx;
scnprintf(filename, PATH_MAX, "%s/pe-file.exe", d);
ret = filename__read_build_id(filename, &bid);
TEST_ASSERT_VAL("Failed to read build_id",
ret == sizeof(expect_build_id));
TEST_ASSERT_VAL("Wrong build_id", !memcmp(bid.data, expect_build_id,
sizeof(expect_build_id)));
ret = filename__read_debuglink(filename, debuglink, PATH_MAX);
TEST_ASSERT_VAL("Failed to read debuglink", ret == 0);
TEST_ASSERT_VAL("Wrong debuglink",
!strcmp(debuglink, expect_debuglink));
scnprintf(debugfile, PATH_MAX, "%s/%s", d, debuglink);
ret = filename__read_build_id(debugfile, &bid);
TEST_ASSERT_VAL("Failed to read debug file build_id",
ret == sizeof(expect_build_id));
TEST_ASSERT_VAL("Wrong build_id", !memcmp(bid.data, expect_build_id,
sizeof(expect_build_id)));
dso = dso__new(filename);
TEST_ASSERT_VAL("Failed to get dso", dso);
ret = dso__load_bfd_symbols(dso, debugfile);
TEST_ASSERT_VAL("Failed to load symbols", ret == 0);
dso__sort_by_name(dso);
sym = dso__find_symbol_by_name(dso, "main", &idx);
TEST_ASSERT_VAL("Failed to find main", sym);
dso__delete(dso);
return TEST_OK;
}
static int test__pe_file_parsing(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct stat st;
char path_dir[PATH_MAX];
/* First try development tree tests. */
if (!lstat("./tests", &st))
return run_dir("./tests");
/* Then installed path. */
snprintf(path_dir, PATH_MAX, "%s/tests", get_argv_exec_path());
if (!lstat(path_dir, &st))
return run_dir(path_dir);
return TEST_SKIP;
}
#else
static int test__pe_file_parsing(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
return TEST_SKIP;
}
#endif
DEFINE_SUITE("PE file support", pe_file_parsing);
| linux-master | tools/perf/tests/pe-file-parsing.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.